mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
merge fix
This commit is contained in:
@ -94,6 +94,7 @@ tonu@volk.internalnet
|
||||
tonu@x153.internalnet
|
||||
tonu@x3.internalnet
|
||||
venu@work.mysql.com
|
||||
vva@eagle.mysql.r18.ru
|
||||
vva@genie.(none)
|
||||
walrus@kishkin.ru
|
||||
walrus@mysql.com
|
||||
|
@ -37,4 +37,5 @@ enum options { OPT_CHARSETS_DIR=256, OPT_DEFAULT_CHARSET,
|
||||
OPT_SELECT_LIMIT, OPT_MAX_JOIN_SIZE, OPT_SSL_SSL,
|
||||
OPT_SSL_KEY, OPT_SSL_CERT, OPT_SSL_CA, OPT_SSL_CAPATH,
|
||||
OPT_SSL_CIPHER, OPT_SHUTDOWN_TIMEOUT, OPT_LOCAL_INFILE,
|
||||
OPT_DELETE_MASTER_LOGS,
|
||||
OPT_PROMPT, OPT_IGN_LINES,OPT_TRANSACTION, OPT_FRM };
|
||||
|
@ -2405,31 +2405,32 @@ select_limit, max_join_size);
|
||||
static int
|
||||
put_info(const char *str,INFO_TYPE info_type,uint error)
|
||||
{
|
||||
FILE *file= (info_type == INFO_ERROR ? stderr : stdout);
|
||||
static int inited=0;
|
||||
|
||||
if (status.batch)
|
||||
{
|
||||
if (info_type == INFO_ERROR)
|
||||
{
|
||||
(void) fflush(stdout);
|
||||
fprintf(stderr,"ERROR");
|
||||
(void) fflush(file);
|
||||
fprintf(file,"ERROR");
|
||||
if (error)
|
||||
(void) fprintf(stderr," %d",error);
|
||||
(void) fprintf(file," %d",error);
|
||||
if (status.query_start_line && line_numbers)
|
||||
{
|
||||
(void) fprintf(stderr," at line %lu",status.query_start_line);
|
||||
(void) fprintf(file," at line %lu",status.query_start_line);
|
||||
if (status.file_name)
|
||||
(void) fprintf(stderr," in file: '%s'", status.file_name);
|
||||
(void) fprintf(file," in file: '%s'", status.file_name);
|
||||
}
|
||||
(void) fprintf(stderr,": %s\n",str);
|
||||
(void) fflush(stderr);
|
||||
(void) fprintf(file,": %s\n",str);
|
||||
(void) fflush(file);
|
||||
if (!ignore_errors)
|
||||
return 1;
|
||||
}
|
||||
else if (info_type == INFO_RESULT && verbose > 1)
|
||||
tee_puts(str, stdout);
|
||||
tee_puts(str, file);
|
||||
if (unbuffered)
|
||||
fflush(stdout);
|
||||
fflush(file);
|
||||
return info_type == INFO_ERROR ? -1 : 0;
|
||||
}
|
||||
if (!opt_silent || info_type == INFO_ERROR)
|
||||
@ -2447,17 +2448,17 @@ put_info(const char *str,INFO_TYPE info_type,uint error)
|
||||
putchar('\007'); /* This should make a bell */
|
||||
vidattr(A_STANDOUT);
|
||||
if (error)
|
||||
(void) tee_fprintf(stderr, "ERROR %d: ", error);
|
||||
(void) tee_fprintf(file, "ERROR %d: ", error);
|
||||
else
|
||||
tee_puts("ERROR: ", stdout);
|
||||
tee_puts("ERROR: ", file);
|
||||
}
|
||||
else
|
||||
vidattr(A_BOLD);
|
||||
(void) tee_puts(str, stdout);
|
||||
(void) tee_puts(str, file);
|
||||
vidattr(A_NORMAL);
|
||||
}
|
||||
if (unbuffered)
|
||||
fflush(stdout);
|
||||
fflush(file);
|
||||
return info_type == INFO_ERROR ? -1 : 0;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
** Added --single-transaction option 06/06/2002 by Peter Zaitsev
|
||||
*/
|
||||
|
||||
#define DUMP_VERSION "9.07"
|
||||
#define DUMP_VERSION "9.08"
|
||||
|
||||
#include <my_global.h>
|
||||
#include <my_sys.h>
|
||||
@ -77,7 +77,8 @@ static my_bool verbose=0,tFlag=0,cFlag=0,dFlag=0,quick=0, extended_insert = 0,
|
||||
opt_delayed=0,create_options=0,opt_quoted=0,opt_databases=0,
|
||||
opt_alldbs=0,opt_create_db=0,opt_first_slave=0,
|
||||
opt_autocommit=0,opt_master_data,opt_disable_keys=0,opt_xml=0,
|
||||
tty_password=0,opt_single_transaction=0;
|
||||
opt_delete_master_logs=0, tty_password=0,
|
||||
opt_single_transaction=0;
|
||||
static MYSQL mysql_connection,*sock=0;
|
||||
static char insert_pat[12 * 1024],*opt_password=0,*current_user=0,
|
||||
*current_host=0,*path=0,*fields_terminated=0,
|
||||
@ -129,6 +130,9 @@ static struct my_option my_long_options[] =
|
||||
{"delayed-insert", OPT_DELAYED, "Insert rows with INSERT DELAYED.",
|
||||
(gptr*) &opt_delayed, (gptr*) &opt_delayed, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
|
||||
0, 0},
|
||||
{"delete-master-logs", OPT_DELETE_MASTER_LOGS,
|
||||
"Delete logs on master after backup. This will automagically enable --first-slave.",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"disable-keys", 'K',
|
||||
"'/*!40000 ALTER TABLE tb_name DISABLE KEYS */; and '/*!40000 ALTER TABLE tb_name ENABLE KEYS */; will be put in the output.", (gptr*) &opt_disable_keys,
|
||||
(gptr*) &opt_disable_keys, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
@ -318,15 +322,20 @@ static void write_footer(FILE *sql_file)
|
||||
fputs("\n", sql_file);
|
||||
} /* write_footer */
|
||||
|
||||
|
||||
static my_bool
|
||||
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
||||
char *argument)
|
||||
{
|
||||
switch(optid) {
|
||||
switch (optid) {
|
||||
case OPT_MASTER_DATA:
|
||||
opt_master_data=1;
|
||||
opt_first_slave=1;
|
||||
break;
|
||||
case OPT_DELETE_MASTER_LOGS:
|
||||
opt_delete_master_logs=1;
|
||||
opt_first_slave=1;
|
||||
break;
|
||||
case 'p':
|
||||
if (argument)
|
||||
{
|
||||
@ -1431,6 +1440,11 @@ int main(int argc, char **argv)
|
||||
|
||||
if (opt_first_slave)
|
||||
{
|
||||
if (opt_delete_master_logs && mysql_query(sock, "FLUSH MASTER"))
|
||||
{
|
||||
my_printf_error(0, "Error: Couldn't execute 'FLUSH MASTER': %s",
|
||||
MYF(0), mysql_error(sock));
|
||||
}
|
||||
if (opt_master_data)
|
||||
{
|
||||
if (mysql_query(sock, "SHOW MASTER STATUS") ||
|
||||
@ -1452,9 +1466,6 @@ int main(int argc, char **argv)
|
||||
mysql_free_result(master);
|
||||
}
|
||||
}
|
||||
if (mysql_query(sock, "FLUSH MASTER"))
|
||||
my_printf_error(0, "Error: Couldn't execute 'FLUSH MASTER': %s",
|
||||
MYF(0), mysql_error(sock));
|
||||
if (mysql_query(sock, "UNLOCK TABLES"))
|
||||
my_printf_error(0, "Error: Couldn't execute 'UNLOCK TABLES': %s",
|
||||
MYF(0), mysql_error(sock));
|
||||
|
@ -62,7 +62,7 @@ int _mi_write_blob_record(MI_INFO *info, const byte *record)
|
||||
|
||||
extra= (ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER)+MI_SPLIT_LENGTH+
|
||||
MI_DYN_DELETE_BLOCK_HEADER+1);
|
||||
reclength= (info->s->base.pack_reclength+ info->s->base.pack_bits+
|
||||
reclength= (info->s->base.pack_reclength + info->s->base.pack_bits +
|
||||
_my_calc_total_blob_length(info,record)+ extra);
|
||||
#ifdef NOT_USED /* We now support big rows */
|
||||
if (reclength > MI_DYN_MAX_ROW_LENGTH)
|
||||
|
@ -237,13 +237,18 @@ a
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 0
|
||||
select sql_cache * from t1;
|
||||
select sql_cache * from t1 union select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
set query_cache_type=2;
|
||||
select sql_cache * from t1;
|
||||
select sql_cache * from t1 union select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
select * from t1 union select sql_cache * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
@ -253,7 +258,7 @@ Variable_name Value
|
||||
Qcache_hits 4
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 1
|
||||
Qcache_queries_in_cache 2
|
||||
set query_cache_type=on;
|
||||
reset query cache;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
@ -576,3 +581,21 @@ show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 0
|
||||
drop table t1;
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1),(2);
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 0
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
SET OPTION SQL_SELECT_LIMIT=1;
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 2
|
||||
SET OPTION SQL_SELECT_LIMIT=DEFAULT;
|
||||
drop table t1;
|
||||
|
@ -6,8 +6,19 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
slave start;
|
||||
create table t1(a int not null auto_increment, b int, primary key(a) );
|
||||
load data infile '../../std_data/rpl_loaddata.dat' into table t1;
|
||||
create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60));
|
||||
load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' ignore 1 lines;
|
||||
create table t3 (day date,id int(9),category enum('a','b','c'),name varchar(60));
|
||||
insert into t3 select * from t2;
|
||||
select * from t1;
|
||||
a b
|
||||
1 10
|
||||
2 15
|
||||
select * from t3;
|
||||
day id category name
|
||||
2003-02-22 2461 b a a a @ % ' " a
|
||||
2003-03-22 2161 c asdf
|
||||
2003-04-22 2416 a bbbbb
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
drop table t3;
|
||||
|
8
mysql-test/std_data/rpl_loaddata2.dat
Normal file
8
mysql-test/std_data/rpl_loaddata2.dat
Normal file
@ -0,0 +1,8 @@
|
||||
2003-01-21,6328,%a%,%aaaaa%
|
||||
##
|
||||
2003-02-22,2461,b,%a a a @@ @% @b ' " a%
|
||||
##
|
||||
2003-03-22,2161,%c%,%asdf%
|
||||
##
|
||||
2003-04-22,2416,%a%,%bbbbb%
|
||||
##
|
@ -144,9 +144,10 @@ create table t1 (a int not null);
|
||||
insert into t1 values (1),(2),(3);
|
||||
select * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
select sql_cache * from t1;
|
||||
select sql_cache * from t1 union select * from t1;
|
||||
set query_cache_type=2;
|
||||
select sql_cache * from t1;
|
||||
select sql_cache * from t1 union select * from t1;
|
||||
select * from t1 union select sql_cache * from t1;
|
||||
show status like "Qcache_hits";
|
||||
show status like "Qcache_queries_in_cache";
|
||||
set query_cache_type=on;
|
||||
@ -400,7 +401,6 @@ select * from t1 where id=2;
|
||||
#
|
||||
# Load data invalidation test
|
||||
#
|
||||
|
||||
create table t1 (word char(20) not null);
|
||||
select * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
@ -411,7 +411,6 @@ drop table t1;
|
||||
#
|
||||
# INTO OUTFILE/DUMPFILE test
|
||||
#
|
||||
|
||||
drop table if exists t1;
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1),(2),(3);
|
||||
@ -419,4 +418,17 @@ show status like "Qcache_queries_in_cache";
|
||||
select * from t1 into outfile "query_caceh.out.file";
|
||||
select * from t1 limit 1 into dumpfile "query_cache.dump.file";
|
||||
show status like "Qcache_queries_in_cache";
|
||||
drop table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# test of SQL_SELECT_LIMIT
|
||||
#
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1),(2);
|
||||
show status like "Qcache_queries_in_cache";
|
||||
select * from t1;
|
||||
SET OPTION SQL_SELECT_LIMIT=1;
|
||||
select * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
SET OPTION SQL_SELECT_LIMIT=DEFAULT;
|
||||
drop table t1;
|
||||
|
@ -1,16 +1,34 @@
|
||||
# See if replication of a "LOAD DATA in an autoincrement column"
|
||||
# Honours autoincrement values
|
||||
# i.e. if the master and slave have the same sequence
|
||||
#
|
||||
# check replication of load data for temporary tables with additional parameters
|
||||
#
|
||||
source include/master-slave.inc;
|
||||
|
||||
create table t1(a int not null auto_increment, b int, primary key(a) );
|
||||
load data infile '../../std_data/rpl_loaddata.dat' into table t1;
|
||||
|
||||
create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60));
|
||||
#load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionaly enclosed by '%' escaped by '@' lines terminated by '\n%%\n' ignore 1 lines;
|
||||
load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' ignore 1 lines;
|
||||
|
||||
create table t3 (day date,id int(9),category enum('a','b','c'),name varchar(60));
|
||||
insert into t3 select * from t2;
|
||||
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
select * from t1;
|
||||
|
||||
select * from t1;
|
||||
select * from t3;
|
||||
|
||||
connection master;
|
||||
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
drop table t3;
|
||||
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
|
@ -23,11 +23,15 @@
|
||||
|
||||
int my_snprintf(char* to, size_t n, const char* fmt, ...)
|
||||
{
|
||||
int result;
|
||||
va_list args;
|
||||
va_start(args,fmt);
|
||||
return my_vsnprintf(to, n, fmt, args);
|
||||
result= my_vsnprintf(to, n, fmt, args);
|
||||
va_end(args);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
|
||||
{
|
||||
char *start=to, *end=to+n-1;
|
||||
@ -79,6 +83,7 @@ int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
|
||||
return (uint) (to - start);
|
||||
}
|
||||
|
||||
|
||||
#ifdef MAIN
|
||||
#define OVERRUN_SENTRY 250
|
||||
static void my_printf(const char * fmt, ...)
|
||||
@ -99,6 +104,7 @@ static void my_printf(const char * fmt, ...)
|
||||
va_end(ar);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
|
@ -2734,6 +2734,51 @@ assert("drop table crash_me_n $drop_attr");
|
||||
|
||||
|
||||
|
||||
$key = 'sorted_group_by';
|
||||
$prompt = 'Group by always sorted';
|
||||
if (!defined($limits{$key}))
|
||||
{
|
||||
save_incomplete($key,$prompt);
|
||||
print "$prompt=";
|
||||
safe_query_l($key,[
|
||||
"create table crash_me_t1 (a int not null, b int not null)",
|
||||
"insert into crash_me_t1 values (1,1)",
|
||||
"insert into crash_me_t1 values (1,2)",
|
||||
"insert into crash_me_t1 values (3,1)",
|
||||
"insert into crash_me_t1 values (3,2)",
|
||||
"insert into crash_me_t1 values (2,2)",
|
||||
"insert into crash_me_t1 values (2,1)",
|
||||
"create table crash_me_t2 (a int not null, b int not null)",
|
||||
"create index crash_me_t2_ind on crash_me_t2 (a)",
|
||||
"insert into crash_me_t2 values (1,3)",
|
||||
"insert into crash_me_t2 values (3,1)",
|
||||
"insert into crash_me_t2 values (2,2)",
|
||||
"insert into crash_me_t2 values (1,1)"]);
|
||||
|
||||
my $bigqry = "select crash_me_t1.a,crash_me_t2.b from ".
|
||||
"crash_me_t1,crash_me_t2 where crash_me_t1.a=crash_me_t2.a ".
|
||||
"group by crash_me_t1.a,crash_me_t2.b";
|
||||
|
||||
my $limit='no';
|
||||
my $rs = get_recordset($key,$bigqry);
|
||||
print_recordset($key,$rs);
|
||||
if ( defined ($rs)) {
|
||||
if (compare_recordset($key,$rs,[[1,1],[1,3],[2,2],[3,1]]) eq 0)
|
||||
{
|
||||
$limit='yes'
|
||||
}
|
||||
} else {
|
||||
add_log($key,"error: ".$DBI::errstr);
|
||||
}
|
||||
|
||||
print "$limit\n";
|
||||
safe_query_l($key,["drop table crash_me_t1",
|
||||
"drop table crash_me_t2"]);
|
||||
save_config_data($key,$limit,$prompt);
|
||||
|
||||
} else {
|
||||
print "$prompt=$limits{$key} (cashed)\n";
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
|
@ -252,7 +252,7 @@ sub test_update
|
||||
|
||||
for ($id=0 ; $id < $opt_loop_count ; $id++)
|
||||
{
|
||||
do_query($dbh,"update bench1 set updated=1 where idn=$id");
|
||||
do_query($dbh,"update $table set updated=1 where idn=$id");
|
||||
}
|
||||
|
||||
$dbh->commit if (!$auto_commit);
|
||||
|
@ -1037,7 +1037,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
|
||||
&keydef, table_arg->keys*sizeof(MI_KEYDEF),
|
||||
&keyseg,
|
||||
((table_arg->key_parts + table_arg->keys) * sizeof(MI_KEYSEG)),
|
||||
0)))
|
||||
NullS)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
pos=table_arg->key_info;
|
||||
|
17
sql/log.cc
17
sql/log.cc
@ -310,7 +310,10 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
|
||||
DBUG_RETURN(0);
|
||||
|
||||
err:
|
||||
sql_print_error("Could not use %s for logging (error %d)", log_name, errno);
|
||||
sql_print_error("Could not use %s for logging (error %d). \
|
||||
Turning logging off for the whole duration of the MySQL server process. \
|
||||
To turn it on again: fix the cause, \
|
||||
shutdown the MySQL server and restart it.", log_name, errno);
|
||||
if (file >= 0)
|
||||
my_close(file,MYF(0));
|
||||
if (index_file_nr >= 0)
|
||||
@ -1120,9 +1123,17 @@ bool MYSQL_LOG::write(Log_event* event_info)
|
||||
the table handler commit here, protected by the LOCK_log mutex,
|
||||
because otherwise the transactions may end up in a different order
|
||||
in the table handler log!
|
||||
|
||||
Note that we will NOT call ha_report_binlog_offset_and_commit() if
|
||||
there are binlog events cached in the transaction cache. That is
|
||||
because then the log event which we write to the binlog here is
|
||||
not a transactional event. In versions < 4.0.13 before this fix this
|
||||
caused an InnoDB transaction to be committed if in the middle there
|
||||
was a MyISAM event!
|
||||
*/
|
||||
|
||||
if (file == &log_file)
|
||||
if (file == &log_file && opt_using_transactions
|
||||
&& !my_b_tell(&thd->transaction.trans_log))
|
||||
{
|
||||
/*
|
||||
LOAD DATA INFILE in AUTOCOMMIT=1 mode writes to the binlog
|
||||
@ -1560,6 +1571,8 @@ void sql_print_error(const char *format,...)
|
||||
char buff[1024];
|
||||
my_vsnprintf(buff,sizeof(buff)-1,format,args);
|
||||
DBUG_PRINT("error",("%s",buff));
|
||||
va_end(args);
|
||||
va_start(args,format);
|
||||
}
|
||||
#endif
|
||||
skr=time(NULL);
|
||||
|
@ -282,9 +282,9 @@ void Load_log_event::pack_info(String* packet)
|
||||
tmp.append("LOAD DATA INFILE '");
|
||||
tmp.append(fname, fname_len);
|
||||
tmp.append("' ", 2);
|
||||
if (sql_ex.opt_flags && REPLACE_FLAG )
|
||||
if (sql_ex.opt_flags & REPLACE_FLAG)
|
||||
tmp.append(" REPLACE ");
|
||||
else if (sql_ex.opt_flags && IGNORE_FLAG )
|
||||
else if (sql_ex.opt_flags & IGNORE_FLAG)
|
||||
tmp.append(" IGNORE ");
|
||||
|
||||
tmp.append("INTO TABLE ");
|
||||
@ -297,7 +297,7 @@ void Load_log_event::pack_info(String* packet)
|
||||
|
||||
if (sql_ex.enclosed_len)
|
||||
{
|
||||
if (sql_ex.opt_flags && OPT_ENCLOSED_FLAG )
|
||||
if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG )
|
||||
tmp.append(" OPTIONALLY ");
|
||||
tmp.append( " ENCLOSED BY ");
|
||||
pretty_print_str(&tmp, sql_ex.enclosed, sql_ex.enclosed_len);
|
||||
@ -1146,28 +1146,28 @@ Load_log_event::Load_log_event(THD* thd_arg, sql_exchange* ex,
|
||||
sql_ex.cached_new_format = -1;
|
||||
|
||||
if (ex->dumpfile)
|
||||
sql_ex.opt_flags |= DUMPFILE_FLAG;
|
||||
sql_ex.opt_flags|= DUMPFILE_FLAG;
|
||||
if (ex->opt_enclosed)
|
||||
sql_ex.opt_flags |= OPT_ENCLOSED_FLAG;
|
||||
sql_ex.opt_flags|= OPT_ENCLOSED_FLAG;
|
||||
|
||||
sql_ex.empty_flags = 0;
|
||||
|
||||
switch (handle_dup) {
|
||||
case DUP_IGNORE: sql_ex.opt_flags |= IGNORE_FLAG; break;
|
||||
case DUP_REPLACE: sql_ex.opt_flags |= REPLACE_FLAG; break;
|
||||
case DUP_IGNORE: sql_ex.opt_flags|= IGNORE_FLAG; break;
|
||||
case DUP_REPLACE: sql_ex.opt_flags|= REPLACE_FLAG; break;
|
||||
case DUP_ERROR: break;
|
||||
}
|
||||
|
||||
if (!ex->field_term->length())
|
||||
sql_ex.empty_flags |= FIELD_TERM_EMPTY;
|
||||
sql_ex.empty_flags|= FIELD_TERM_EMPTY;
|
||||
if (!ex->enclosed->length())
|
||||
sql_ex.empty_flags |= ENCLOSED_EMPTY;
|
||||
sql_ex.empty_flags|= ENCLOSED_EMPTY;
|
||||
if (!ex->line_term->length())
|
||||
sql_ex.empty_flags |= LINE_TERM_EMPTY;
|
||||
sql_ex.empty_flags|= LINE_TERM_EMPTY;
|
||||
if (!ex->line_start->length())
|
||||
sql_ex.empty_flags |= LINE_START_EMPTY;
|
||||
sql_ex.empty_flags|= LINE_START_EMPTY;
|
||||
if (!ex->escaped->length())
|
||||
sql_ex.empty_flags |= ESCAPED_EMPTY;
|
||||
sql_ex.empty_flags|= ESCAPED_EMPTY;
|
||||
|
||||
skip_lines = ex->skip_lines;
|
||||
|
||||
@ -1273,9 +1273,9 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db)
|
||||
|
||||
fprintf(file, "LOAD DATA INFILE '%-*s' ", fname_len, fname);
|
||||
|
||||
if (sql_ex.opt_flags && REPLACE_FLAG )
|
||||
if (sql_ex.opt_flags & REPLACE_FLAG )
|
||||
fprintf(file," REPLACE ");
|
||||
else if (sql_ex.opt_flags && IGNORE_FLAG )
|
||||
else if (sql_ex.opt_flags & IGNORE_FLAG )
|
||||
fprintf(file," IGNORE ");
|
||||
|
||||
fprintf(file, "INTO TABLE %s ", table_name);
|
||||
@ -1287,7 +1287,7 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db)
|
||||
|
||||
if (sql_ex.enclosed)
|
||||
{
|
||||
if (sql_ex.opt_flags && OPT_ENCLOSED_FLAG )
|
||||
if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG )
|
||||
fprintf(file," OPTIONALLY ");
|
||||
fprintf(file, " ENCLOSED BY ");
|
||||
pretty_print_str(file, sql_ex.enclosed, sql_ex.enclosed_len);
|
||||
@ -1859,15 +1859,19 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
|
||||
{
|
||||
char llbuff[22];
|
||||
enum enum_duplicates handle_dup = DUP_IGNORE;
|
||||
if (sql_ex.opt_flags && REPLACE_FLAG)
|
||||
handle_dup = DUP_REPLACE;
|
||||
sql_exchange ex((char*)fname, sql_ex.opt_flags &&
|
||||
DUMPFILE_FLAG );
|
||||
if (sql_ex.opt_flags & REPLACE_FLAG)
|
||||
handle_dup= DUP_REPLACE;
|
||||
sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG);
|
||||
String field_term(sql_ex.field_term,sql_ex.field_term_len);
|
||||
String enclosed(sql_ex.enclosed,sql_ex.enclosed_len);
|
||||
String line_term(sql_ex.line_term,sql_ex.line_term_len);
|
||||
String line_start(sql_ex.line_start,sql_ex.line_start_len);
|
||||
String escaped(sql_ex.escaped,sql_ex.escaped_len);
|
||||
ex.field_term= &field_term;
|
||||
ex.enclosed= &enclosed;
|
||||
ex.line_term= &line_term;
|
||||
ex.line_start= &line_start;
|
||||
ex.escaped= &escaped;
|
||||
|
||||
ex.opt_enclosed = (sql_ex.opt_flags & OPT_ENCLOSED_FLAG);
|
||||
if (sql_ex.empty_flags & FIELD_TERM_EMPTY)
|
||||
@ -1876,7 +1880,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
|
||||
ex.skip_lines = skip_lines;
|
||||
List<Item> field_list;
|
||||
set_fields(field_list);
|
||||
thd->slave_proxy_id = thd->thread_id;
|
||||
thd->slave_proxy_id = thread_id;
|
||||
if (net)
|
||||
{
|
||||
// mysql_load will use thd->net to read the file
|
||||
|
@ -2105,7 +2105,7 @@ int main(int argc, char **argv)
|
||||
size_t stack_size= 0;
|
||||
pthread_attr_getstacksize(&connection_attrib, &stack_size);
|
||||
/* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */
|
||||
if (stack_size && stack_size != thread_stack)
|
||||
if (stack_size && stack_size < thread_stack)
|
||||
{
|
||||
if (global_system_variables.log_warnings)
|
||||
sql_print_error("Warning: Asked for %ld thread stack, but got %ld",
|
||||
@ -3321,7 +3321,7 @@ struct my_option my_long_options[] =
|
||||
(gptr*) &opt_local_infile, 0, GET_BOOL, OPT_ARG,
|
||||
1, 0, 0, 0, 0, 0},
|
||||
{"log-bin", OPT_BIN_LOG,
|
||||
"Log queries in new binary format (for replication)",
|
||||
"Log update queries in binary format",
|
||||
(gptr*) &opt_bin_logname, (gptr*) &opt_bin_logname, 0, GET_STR_ALLOC,
|
||||
OPT_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"log-bin-index", OPT_BIN_LOG_INDEX,
|
||||
@ -3375,27 +3375,32 @@ struct my_option my_long_options[] =
|
||||
(gptr*) &master_retry_count, (gptr*) &master_retry_count, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0},
|
||||
{"master-info-file", OPT_MASTER_INFO_FILE,
|
||||
"The location of the file that remembers where we left off on the master during the replication process. The default is `master.info' in the data directory. You should not need to change this.",
|
||||
"The location and name of the file that remembers the master and where the I/O replication \
|
||||
thread is in the master's binlogs.",
|
||||
(gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"master-ssl", OPT_MASTER_SSL,
|
||||
"Turn SSL on for replication. Be warned that is this is a relatively new feature.",
|
||||
"Planned to enable the slave to connect to the master using SSL. Does nothing yet.",
|
||||
(gptr*) &master_ssl, (gptr*) &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
|
||||
0, 0},
|
||||
{"master-ssl-key", OPT_MASTER_SSL_KEY,
|
||||
"Master SSL keyfile name. Only applies if you have enabled master-ssl.",
|
||||
"Master SSL keyfile name. Only applies if you have enabled master-ssl. Does \
|
||||
nothing yet.",
|
||||
(gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"master-ssl-cert", OPT_MASTER_SSL_CERT,
|
||||
"Master SSL certificate file name. Only applies if you have enabled master-ssl.",
|
||||
"Master SSL certificate file name. Only applies if you have enabled \
|
||||
master-ssl. Does nothing yet.",
|
||||
(gptr*) &master_ssl_cert, (gptr*) &master_ssl_cert, 0, GET_STR, OPT_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"master-ssl-capath", OPT_MASTER_SSL_CAPATH,
|
||||
"Master SSL CA path. Only applies if you have enabled master-ssl.",
|
||||
"Master SSL CA path. Only applies if you have enabled master-ssl. \
|
||||
Does nothing yet.",
|
||||
(gptr*) &master_ssl_capath, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"master-ssl-cipher", OPT_MASTER_SSL_CIPHER,
|
||||
"Master SSL cipher. Only applies if you have enabled master-ssl.",
|
||||
"Master SSL cipher. Only applies if you have enabled master-ssl. \
|
||||
Does nothing yet.",
|
||||
(gptr*) &master_ssl_cipher, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"myisam-recover", OPT_MYISAM_RECOVER,
|
||||
@ -3494,10 +3499,13 @@ struct my_option my_long_options[] =
|
||||
{"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented",
|
||||
(gptr*) &rpl_recovery_rank, (gptr*) &rpl_recovery_rank, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"relay-log", OPT_RELAY_LOG, "Undocumented",
|
||||
{"relay-log", OPT_RELAY_LOG,
|
||||
"The location and name to use for relay logs",
|
||||
(gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0,
|
||||
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"relay-log-index", OPT_RELAY_LOG_INDEX, "Undocumented",
|
||||
{"relay-log-index", OPT_RELAY_LOG_INDEX,
|
||||
"The location and name to use for the file that keeps a list of the last \
|
||||
relay logs",
|
||||
(gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0,
|
||||
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing).",
|
||||
@ -3559,10 +3567,14 @@ struct my_option my_long_options[] =
|
||||
{"skip-thread-priority", OPT_SKIP_PRIOR,
|
||||
"Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
|
||||
0, 0, 0, 0, 0},
|
||||
{"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE, "Undocumented",
|
||||
{"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE,
|
||||
"The location and name of the file that remembers where the SQL replication \
|
||||
thread is in the relay logs",
|
||||
(gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR, "Undocumented",
|
||||
{"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR,
|
||||
"The location where the slave should put its temporary files when \
|
||||
replicating a LOAD DATA INFILE command",
|
||||
(gptr*) &slave_load_tmpdir, (gptr*) &slave_load_tmpdir, 0, GET_STR_ALLOC,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"slave-skip-errors", OPT_SLAVE_SKIP_ERRORS,
|
||||
@ -3888,7 +3900,7 @@ struct my_option my_long_options[] =
|
||||
(gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG,
|
||||
128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0},
|
||||
{"relay_log_space_limit", OPT_RELAY_LOG_SPACE_LIMIT,
|
||||
"Max space to use for all relay logs",
|
||||
"Maximum space to use for all relay logs",
|
||||
(gptr*) &relay_log_space_limit,
|
||||
(gptr*) &relay_log_space_limit, 0, GET_ULL, REQUIRED_ARG, 0L, 0L,
|
||||
(longlong) ULONG_MAX, 0, 1, 0},
|
||||
|
@ -28,6 +28,9 @@
|
||||
#include "mysql_priv.h"
|
||||
#include "sql_acl.h"
|
||||
#include "hash_filo.h"
|
||||
#ifdef HAVE_REPLICATION
|
||||
#include "sql_repl.h" //for tables_ok()
|
||||
#endif
|
||||
#include <m_ctype.h>
|
||||
#include <assert.h>
|
||||
#include <stdarg.h>
|
||||
@ -2052,6 +2055,15 @@ int mysql_table_grant (THD *thd, TABLE_LIST *table_list,
|
||||
tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_WRITE;
|
||||
tables[0].db=tables[1].db=tables[2].db=(char*) "mysql";
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
/*
|
||||
GRANT and REVOKE are applied the slave in/exclusion rules as they are
|
||||
some kind of updates to the mysql.% tables.
|
||||
*/
|
||||
if (thd->slave_thread && table_rules_on && !tables_ok(0, tables))
|
||||
DBUG_RETURN(0);
|
||||
#endif
|
||||
|
||||
if (open_and_lock_tables(thd,tables))
|
||||
{ // Should never happen
|
||||
close_thread_tables(thd); /* purecov: deadcode */
|
||||
@ -2214,6 +2226,16 @@ int mysql_grant (THD *thd, const char *db, List <LEX_USER> &list,
|
||||
tables[0].lock_type=tables[1].lock_type=TL_WRITE;
|
||||
tables[0].db=tables[1].db=(char*) "mysql";
|
||||
tables[0].table=tables[1].table=0;
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
/*
|
||||
GRANT and REVOKE are applied the slave in/exclusion rules as they are
|
||||
some kind of updates to the mysql.% tables.
|
||||
*/
|
||||
if (thd->slave_thread && table_rules_on && !tables_ok(0, tables))
|
||||
DBUG_RETURN(0);
|
||||
#endif
|
||||
|
||||
if (open_and_lock_tables(thd,tables))
|
||||
{ // This should never happen
|
||||
close_thread_tables(thd); /* purecov: deadcode */
|
||||
|
@ -743,11 +743,11 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
|
||||
if (query_cache_size == 0)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
if ((local_tables = is_cacheable(thd, thd->query_length,
|
||||
if ((local_tables= is_cacheable(thd, thd->query_length,
|
||||
thd->query, &thd->lex, tables_used)))
|
||||
{
|
||||
NET *net = &thd->net;
|
||||
byte flags = (thd->client_capabilities & CLIENT_LONG_FLAG ? 0x80 : 0);
|
||||
NET *net= &thd->net;
|
||||
byte flags= (thd->client_capabilities & CLIENT_LONG_FLAG ? 0x80 : 0);
|
||||
STRUCT_LOCK(&structure_guard_mutex);
|
||||
|
||||
if (query_cache_size == 0)
|
||||
@ -775,8 +775,10 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
|
||||
flags|= (byte) thd->variables.convert_set->number();
|
||||
DBUG_ASSERT(thd->variables.convert_set->number() < 128);
|
||||
}
|
||||
tot_length=thd->query_length+thd->db_length+2;
|
||||
thd->query[tot_length-1] = (char) flags;
|
||||
tot_length= thd->query_length+thd->db_length+2+sizeof(ha_rows);
|
||||
thd->query[tot_length-1]= (char) flags;
|
||||
memcpy((void *)(thd->query + (tot_length-sizeof(ha_rows)-1)),
|
||||
(const void *)&thd->variables.select_limit, sizeof(ha_rows));
|
||||
|
||||
/* Check if another thread is processing the same query? */
|
||||
Query_cache_block *competitor = (Query_cache_block *)
|
||||
@ -910,7 +912,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
}
|
||||
Query_cache_block *query_block;
|
||||
|
||||
tot_length=query_length+thd->db_length+2;
|
||||
tot_length= query_length+thd->db_length+2+sizeof(ha_rows);
|
||||
if (thd->db_length)
|
||||
{
|
||||
memcpy(sql+query_length+1, thd->db, thd->db_length);
|
||||
@ -926,15 +928,18 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
Most significant bit - CLIENT_LONG_FLAG,
|
||||
Other - charset number (0 no charset convertion)
|
||||
*/
|
||||
flags = (thd->client_capabilities & CLIENT_LONG_FLAG ? 0x80 : 0);
|
||||
flags= (thd->client_capabilities & CLIENT_LONG_FLAG ? 0x80 : 0);
|
||||
if (thd->variables.convert_set != 0)
|
||||
{
|
||||
flags |= (byte) thd->variables.convert_set->number();
|
||||
flags|= (byte) thd->variables.convert_set->number();
|
||||
DBUG_ASSERT(thd->variables.convert_set->number() < 128);
|
||||
}
|
||||
sql[tot_length-1] = (char) flags;
|
||||
query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql,
|
||||
sql[tot_length-1]= (char) flags;
|
||||
memcpy((void *)(sql + (tot_length-sizeof(ha_rows)-1)),
|
||||
(const void *)&thd->variables.select_limit, sizeof(ha_rows));
|
||||
query_block= (Query_cache_block *) hash_search(&queries, (byte*) sql,
|
||||
tot_length);
|
||||
|
||||
/* Quick abort on unlocked data */
|
||||
if (query_block == 0 ||
|
||||
query_block->query()->result() == 0 ||
|
||||
@ -2439,7 +2444,7 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len,
|
||||
|
||||
if (lex->sql_command == SQLCOM_SELECT &&
|
||||
(thd->variables.query_cache_type == 1 ||
|
||||
(thd->variables.query_cache_type == 2 && (lex->select->options &
|
||||
(thd->variables.query_cache_type == 2 && (lex->select_lex.options &
|
||||
OPTION_TO_QUERY_CACHE))) &&
|
||||
thd->safe_to_cache_query)
|
||||
{
|
||||
|
@ -1029,7 +1029,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
||||
/* We must allocate some extra memory for query cache */
|
||||
if (!(thd->query= (char*) thd->memdup_w_gap((gptr) (packet),
|
||||
packet_length,
|
||||
thd->db_length+2)))
|
||||
thd->db_length+2+
|
||||
sizeof(ha_rows))))
|
||||
break;
|
||||
thd->query[packet_length]=0;
|
||||
thd->packet.shrink(thd->variables.net_buffer_length);// Reclaim some memory
|
||||
|
@ -1532,7 +1532,10 @@ select_option:
|
||||
Select->options|= OPTION_FOUND_ROWS;
|
||||
}
|
||||
| SQL_NO_CACHE_SYM { current_thd->safe_to_cache_query=0; }
|
||||
| SQL_CACHE_SYM { Select->options|= OPTION_TO_QUERY_CACHE; }
|
||||
| SQL_CACHE_SYM
|
||||
{
|
||||
Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
|
||||
}
|
||||
| ALL {}
|
||||
;
|
||||
|
||||
|
Reference in New Issue
Block a user