mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge 11.0 into 11.1
This commit is contained in:
@ -508,9 +508,9 @@ mini-benchmark:
|
||||
- |
|
||||
mariadb --skip-column-names -e "SELECT @@version, @@version_comment" | tee /tmp/version
|
||||
grep $MARIADB_MAJOR_VERSION /tmp/version || echo "MariaDB didn't install properly"
|
||||
- yum install -y sysbench procps-ng perf util-linux || yum install -y https://kojipkgs.fedoraproject.org//packages/luajit/2.0.4/3.el7/x86_64/luajit-2.0.4-3.el7.x86_64.rpm https://kojipkgs.fedoraproject.org//packages/sysbench/1.0.17/2.el7/x86_64/sysbench-1.0.17-2.el7.x86_64.rpm https://kojipkgs.fedoraproject.org//packages/ck/0.5.2/2.el7/x86_64/ck-0.5.2-2.el7.x86_64.rpm
|
||||
- yum install -y sysbench procps-ng perf flamegraph flamegraph-stackcollapse-perf util-linux dnf-utils
|
||||
- /usr/share/mariadb/mini-benchmark
|
||||
- cp -av */sysbench-run-*.log */metrics.txt .. # Move files one level down so they can be saved as artifacts
|
||||
- cp -av */sysbench-run-*.log */metrics.txt . # Move files one level down so they can be saved as artifacts
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
|
@ -1266,8 +1266,9 @@ static int get_options(int *argc, char ***argv)
|
||||
if (opt_slave_data)
|
||||
{
|
||||
opt_lock_all_tables= !opt_single_transaction;
|
||||
opt_master_data= 0;
|
||||
opt_delete_master_logs= 0;
|
||||
if (opt_slave_data != MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL)
|
||||
opt_master_data= 0;
|
||||
}
|
||||
|
||||
/* Ensure consistency of the set of binlog & locking options */
|
||||
@ -1280,10 +1281,7 @@ static int get_options(int *argc, char ***argv)
|
||||
return(EX_USAGE);
|
||||
}
|
||||
if (opt_master_data)
|
||||
{
|
||||
opt_lock_all_tables= !opt_single_transaction;
|
||||
opt_slave_data= 0;
|
||||
}
|
||||
if (opt_single_transaction || opt_lock_all_tables)
|
||||
lock_tables= 0;
|
||||
if (enclosed && opt_enclosed)
|
||||
@ -6220,17 +6218,12 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos,
|
||||
|
||||
}
|
||||
|
||||
/* SHOW MASTER STATUS reports file and position */
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- Position to start replication or point-in-time "
|
||||
"recovery from\n--\n\n");
|
||||
fprintf(md_result_file,
|
||||
"%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n",
|
||||
(use_gtid ? "-- " : comment_prefix), file, offset);
|
||||
/* gtid */
|
||||
if (have_mariadb_gtid)
|
||||
{
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- GTID to start replication from\n--\n\n");
|
||||
"\n-- Preferably use GTID to start replication from GTID "
|
||||
"position:\n\n");
|
||||
if (use_gtid)
|
||||
fprintf(md_result_file,
|
||||
"%sCHANGE MASTER TO MASTER_USE_GTID=slave_pos;\n",
|
||||
@ -6239,6 +6232,19 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos,
|
||||
"%sSET GLOBAL gtid_slave_pos='%s';\n",
|
||||
(!use_gtid ? "-- " : comment_prefix), gtid_pos);
|
||||
}
|
||||
|
||||
/* SHOW MASTER STATUS reports file and position */
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- Alternately, following is the position of the binary "
|
||||
"logging from SHOW MASTER STATUS at point of backup."
|
||||
"\n-- Use this when creating a replica of the primary server "
|
||||
"where the backup was made."
|
||||
"\n-- The new server will be connecting to the primary server "
|
||||
"where the backup was taken."
|
||||
"\n--\n\n");
|
||||
fprintf(md_result_file,
|
||||
"%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n",
|
||||
(use_gtid ? "-- " : comment_prefix), file, offset);
|
||||
check_io(md_result_file);
|
||||
|
||||
if (!consistent_binlog_pos)
|
||||
@ -6317,7 +6323,6 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
(opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : "";
|
||||
const char *gtid_comment_prefix= (use_gtid ? comment_prefix : "-- ");
|
||||
const char *nogtid_comment_prefix= (!use_gtid ? comment_prefix : "-- ");
|
||||
int set_gtid_done= 0;
|
||||
|
||||
if (mysql_query_with_error_report(mysql_con, &slave,
|
||||
multi_source ?
|
||||
@ -6333,23 +6338,36 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
return 1;
|
||||
}
|
||||
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- The following is the SQL position of the replication "
|
||||
"taken from SHOW SLAVE STATUS at the time of backup.\n"
|
||||
"-- Use this position when creating a clone of, or replacement "
|
||||
"server, from where the backup was taken."
|
||||
"\n-- This new server will connects to the same primary "
|
||||
"server%s.\n--\n",
|
||||
multi_source ? "(s)" : "");
|
||||
|
||||
if (multi_source)
|
||||
{
|
||||
char gtid_pos[MAX_GTID_LENGTH];
|
||||
if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0))
|
||||
{
|
||||
mysql_free_result(slave);
|
||||
return 1;
|
||||
}
|
||||
print_comment(md_result_file, 0,
|
||||
"-- GTID position to start replication:\n");
|
||||
fprintf(md_result_file, "%sSET GLOBAL gtid_slave_pos='%s';\n",
|
||||
gtid_comment_prefix, gtid_pos);
|
||||
}
|
||||
if (use_gtid)
|
||||
print_comment(md_result_file, 0,
|
||||
"\n-- Use only the MASTER_USE_GTID=slave_pos or "
|
||||
"MASTER_LOG_FILE/MASTER_LOG_POS in the statements below."
|
||||
"\n\n");
|
||||
|
||||
while ((row= mysql_fetch_row(slave)))
|
||||
{
|
||||
if (multi_source && !set_gtid_done)
|
||||
{
|
||||
char gtid_pos[MAX_GTID_LENGTH];
|
||||
if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0))
|
||||
{
|
||||
mysql_free_result(slave);
|
||||
return 1;
|
||||
}
|
||||
if (opt_comments)
|
||||
fprintf(md_result_file, "\n--\n-- Gtid position to start replication "
|
||||
"from\n--\n\n");
|
||||
fprintf(md_result_file, "%sSET GLOBAL gtid_slave_pos='%s';\n",
|
||||
gtid_comment_prefix, gtid_pos);
|
||||
set_gtid_done= 1;
|
||||
}
|
||||
if (row[9 + multi_source] && row[21 + multi_source])
|
||||
{
|
||||
if (use_gtid)
|
||||
@ -6363,11 +6381,6 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
}
|
||||
|
||||
/* SHOW MASTER STATUS reports file and position */
|
||||
if (opt_comments)
|
||||
fprintf(md_result_file,
|
||||
"\n--\n-- Position to start replication or point-in-time "
|
||||
"recovery from (the master of this slave)\n--\n\n");
|
||||
|
||||
if (multi_source)
|
||||
fprintf(md_result_file, "%sCHANGE MASTER '%.80s' TO ",
|
||||
nogtid_comment_prefix, row[0]);
|
||||
@ -6388,6 +6401,7 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
check_io(md_result_file);
|
||||
}
|
||||
}
|
||||
fprintf(md_result_file, "\n");
|
||||
mysql_free_result(slave);
|
||||
return 0;
|
||||
}
|
||||
|
@ -615,7 +615,7 @@ void replace_strings_append(struct st_replace *rep, DYNAMIC_STRING* ds,
|
||||
const char *from);
|
||||
|
||||
ATTRIBUTE_NORETURN
|
||||
static void cleanup_and_exit(int exit_code);
|
||||
static void cleanup_and_exit(int exit_code, bool called_from_die);
|
||||
|
||||
ATTRIBUTE_NORETURN
|
||||
static void really_die(const char *msg);
|
||||
@ -932,6 +932,7 @@ pthread_attr_t cn_thd_attrib;
|
||||
pthread_handler_t connection_thread(void *arg)
|
||||
{
|
||||
struct st_connection *cn= (struct st_connection*)arg;
|
||||
DBUG_ENTER("connection_thread");
|
||||
|
||||
mysql_thread_init();
|
||||
while (cn->command != EMB_END_CONNECTION)
|
||||
@ -943,6 +944,7 @@ pthread_handler_t connection_thread(void *arg)
|
||||
pthread_cond_wait(&cn->query_cond, &cn->query_mutex);
|
||||
pthread_mutex_unlock(&cn->query_mutex);
|
||||
}
|
||||
DBUG_PRINT("info", ("executing command: %d", cn->command));
|
||||
switch (cn->command)
|
||||
{
|
||||
case EMB_END_CONNECTION:
|
||||
@ -963,24 +965,26 @@ pthread_handler_t connection_thread(void *arg)
|
||||
break;
|
||||
case EMB_CLOSE_STMT:
|
||||
cn->result= mysql_stmt_close(cn->stmt);
|
||||
cn->stmt= 0;
|
||||
break;
|
||||
default:
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
cn->command= 0;
|
||||
pthread_mutex_lock(&cn->result_mutex);
|
||||
cn->query_done= 1;
|
||||
cn->command= 0;
|
||||
pthread_cond_signal(&cn->result_cond);
|
||||
pthread_mutex_unlock(&cn->result_mutex);
|
||||
}
|
||||
|
||||
end_thread:
|
||||
cn->query_done= 1;
|
||||
DBUG_ASSERT(cn->stmt == 0);
|
||||
mysql_close(cn->mysql);
|
||||
cn->mysql= 0;
|
||||
cn->query_done= 1;
|
||||
mysql_thread_end();
|
||||
pthread_exit(0);
|
||||
return 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
static void wait_query_thread_done(struct st_connection *con)
|
||||
@ -998,12 +1002,16 @@ static void wait_query_thread_done(struct st_connection *con)
|
||||
|
||||
static void signal_connection_thd(struct st_connection *cn, int command)
|
||||
{
|
||||
DBUG_ENTER("signal_connection_thd");
|
||||
DBUG_PRINT("enter", ("command: %d", command));
|
||||
|
||||
DBUG_ASSERT(cn->has_thread);
|
||||
cn->query_done= 0;
|
||||
cn->command= command;
|
||||
pthread_mutex_lock(&cn->query_mutex);
|
||||
cn->command= command;
|
||||
pthread_cond_signal(&cn->query_cond);
|
||||
pthread_mutex_unlock(&cn->query_mutex);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
@ -1068,27 +1076,37 @@ static int do_stmt_execute(struct st_connection *cn)
|
||||
static int do_stmt_close(struct st_connection *cn)
|
||||
{
|
||||
DBUG_ENTER("do_stmt_close");
|
||||
/* The cn->stmt is already set. */
|
||||
if (!cn->has_thread)
|
||||
DBUG_RETURN(mysql_stmt_close(cn->stmt));
|
||||
{
|
||||
/* The cn->stmt is already set. */
|
||||
int res= mysql_stmt_close(cn->stmt);
|
||||
cn->stmt= 0;
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
wait_query_thread_done(cn);
|
||||
signal_connection_thd(cn, EMB_CLOSE_STMT);
|
||||
wait_query_thread_done(cn);
|
||||
DBUG_ASSERT(cn->stmt == 0);
|
||||
DBUG_RETURN(cn->result);
|
||||
}
|
||||
|
||||
|
||||
static void emb_close_connection(struct st_connection *cn)
|
||||
{
|
||||
DBUG_ENTER("emb_close_connection");
|
||||
if (!cn->has_thread)
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
wait_query_thread_done(cn);
|
||||
signal_connection_thd(cn, EMB_END_CONNECTION);
|
||||
pthread_join(cn->tid, NULL);
|
||||
cn->has_thread= FALSE;
|
||||
DBUG_ASSERT(cn->mysql == 0);
|
||||
DBUG_ASSERT(cn->stmt == 0);
|
||||
pthread_mutex_destroy(&cn->query_mutex);
|
||||
pthread_cond_destroy(&cn->query_cond);
|
||||
pthread_mutex_destroy(&cn->result_mutex);
|
||||
pthread_cond_destroy(&cn->result_cond);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
@ -1112,7 +1130,13 @@ static void init_connection_thd(struct st_connection *cn)
|
||||
#define do_read_query_result(cn) mysql_read_query_result(cn->mysql)
|
||||
#define do_stmt_prepare(cn, q, q_len) mysql_stmt_prepare(cn->stmt, q, (ulong)q_len)
|
||||
#define do_stmt_execute(cn) mysql_stmt_execute(cn->stmt)
|
||||
#define do_stmt_close(cn) mysql_stmt_close(cn->stmt)
|
||||
|
||||
static int do_stmt_close(struct st_connection *cn)
|
||||
{
|
||||
int res= mysql_stmt_close(cn->stmt);
|
||||
cn->stmt= 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif /*EMBEDDED_LIBRARY*/
|
||||
|
||||
@ -1440,7 +1464,6 @@ void close_statements()
|
||||
{
|
||||
if (con->stmt)
|
||||
do_stmt_close(con);
|
||||
con->stmt= 0;
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
@ -1512,7 +1535,8 @@ void ha_pre_shutdown();
|
||||
#endif
|
||||
|
||||
|
||||
ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code)
|
||||
ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code,
|
||||
bool called_from_die)
|
||||
{
|
||||
#ifdef EMBEDDED_LIBRARY
|
||||
if (server_initialized)
|
||||
@ -1525,16 +1549,6 @@ ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code)
|
||||
if (server_initialized)
|
||||
mysql_server_end();
|
||||
|
||||
/*
|
||||
mysqltest is fundamentally written in a way that makes impossible
|
||||
to free all memory before exit (consider memory allocated
|
||||
for frame local DYNAMIC_STRING's and die() invoked down the stack.
|
||||
|
||||
We close stderr here to stop unavoidable safemalloc reports
|
||||
from polluting the output.
|
||||
*/
|
||||
fclose(stderr);
|
||||
|
||||
my_end(my_end_arg);
|
||||
|
||||
if (!silent) {
|
||||
@ -1554,6 +1568,11 @@ ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Report memory leaks, if not called from 'die()', as die() will not release
|
||||
all memory.
|
||||
*/
|
||||
sf_leaking_memory= called_from_die;
|
||||
exit(exit_code);
|
||||
}
|
||||
|
||||
@ -1620,7 +1639,7 @@ static void really_die(const char *msg)
|
||||
second time, just exit
|
||||
*/
|
||||
if (dying)
|
||||
cleanup_and_exit(1);
|
||||
cleanup_and_exit(1, 1);
|
||||
dying= 1;
|
||||
|
||||
log_file.show_tail(opt_tail_lines);
|
||||
@ -1632,7 +1651,7 @@ static void really_die(const char *msg)
|
||||
if (cur_con && !cur_con->pending)
|
||||
show_warnings_before_error(cur_con->mysql);
|
||||
|
||||
cleanup_and_exit(1);
|
||||
cleanup_and_exit(1, 1);
|
||||
}
|
||||
|
||||
void report_or_die(const char *fmt, ...)
|
||||
@ -1686,7 +1705,7 @@ void abort_not_supported_test(const char *fmt, ...)
|
||||
}
|
||||
va_end(args);
|
||||
|
||||
cleanup_and_exit(62);
|
||||
cleanup_and_exit(62, 0);
|
||||
}
|
||||
|
||||
|
||||
@ -2233,14 +2252,14 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname)
|
||||
check_result
|
||||
|
||||
RETURN VALUES
|
||||
error - the function will not return
|
||||
|
||||
0 ok
|
||||
1 error
|
||||
*/
|
||||
|
||||
void check_result()
|
||||
int check_result()
|
||||
{
|
||||
const char *mess= 0;
|
||||
|
||||
int error= 1;
|
||||
DBUG_ENTER("check_result");
|
||||
DBUG_ASSERT(result_file_name);
|
||||
DBUG_PRINT("enter", ("result_file_name: %s", result_file_name));
|
||||
@ -2248,7 +2267,10 @@ void check_result()
|
||||
switch (compare_files(log_file.file_name(), result_file_name)) {
|
||||
case RESULT_OK:
|
||||
if (!error_count)
|
||||
{
|
||||
error= 0;
|
||||
break; /* ok */
|
||||
}
|
||||
mess= "Got errors while running test";
|
||||
/* Fallthrough */
|
||||
case RESULT_LENGTH_MISMATCH:
|
||||
@ -2287,14 +2309,13 @@ void check_result()
|
||||
log_file.file_name(), reject_file, errno);
|
||||
|
||||
show_diff(NULL, result_file_name, reject_file);
|
||||
die("%s", mess);
|
||||
fprintf(stderr, "%s", mess);
|
||||
break;
|
||||
}
|
||||
default: /* impossible */
|
||||
die("Unknown error code from dyn_string_cmp()");
|
||||
}
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
@ -5693,7 +5714,6 @@ void do_close_connection(struct st_command *command)
|
||||
#endif /*!EMBEDDED_LIBRARY*/
|
||||
if (con->stmt)
|
||||
do_stmt_close(con);
|
||||
con->stmt= 0;
|
||||
#ifdef EMBEDDED_LIBRARY
|
||||
/*
|
||||
As query could be still executed in a separate thread
|
||||
@ -7375,17 +7395,17 @@ get_one_option(const struct my_option *opt, const char *argument, const char *)
|
||||
break;
|
||||
case 'V':
|
||||
print_version();
|
||||
exit(0);
|
||||
cleanup_and_exit(0,0);
|
||||
case OPT_MYSQL_PROTOCOL:
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
if ((opt_protocol= find_type_with_warning(argument, &sql_protocol_typelib,
|
||||
opt->name)) <= 0)
|
||||
exit(1);
|
||||
cleanup_and_exit(1,0);
|
||||
#endif
|
||||
break;
|
||||
case '?':
|
||||
usage();
|
||||
exit(0);
|
||||
cleanup_and_exit(0,0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -7397,12 +7417,12 @@ int parse_args(int argc, char **argv)
|
||||
default_argv= argv;
|
||||
|
||||
if ((handle_options(&argc, &argv, my_long_options, get_one_option)))
|
||||
exit(1);
|
||||
cleanup_and_exit(1, 0);
|
||||
|
||||
if (argc > 1)
|
||||
{
|
||||
usage();
|
||||
exit(1);
|
||||
cleanup_and_exit(1, 0);
|
||||
}
|
||||
if (argc == 1)
|
||||
opt_db= *argv;
|
||||
@ -8523,7 +8543,7 @@ void run_query_stmt(struct st_connection *cn, struct st_command *command,
|
||||
my_bool ds_res_1st_execution_init = FALSE;
|
||||
my_bool compare_2nd_execution = TRUE;
|
||||
int query_match_ps2_re;
|
||||
|
||||
MYSQL_RES *res;
|
||||
DBUG_ENTER("run_query_stmt");
|
||||
DBUG_PRINT("query", ("'%-.60s'", query));
|
||||
DBUG_PRINT("info",
|
||||
@ -8729,10 +8749,13 @@ void run_query_stmt(struct st_connection *cn, struct st_command *command,
|
||||
The --enable_prepare_warnings command can be used to change this so
|
||||
that warnings from both the prepare and execute phase are shown.
|
||||
*/
|
||||
if ((mysql_stmt_result_metadata(stmt) != NULL) &&
|
||||
!disable_warnings &&
|
||||
!prepare_warnings_enabled)
|
||||
dynstr_set(&ds_prepare_warnings, NULL);
|
||||
if ((res= mysql_stmt_result_metadata(stmt)))
|
||||
{
|
||||
if (!disable_warnings &&
|
||||
!prepare_warnings_enabled)
|
||||
dynstr_set(&ds_prepare_warnings, NULL);
|
||||
mysql_free_result(res);
|
||||
}
|
||||
|
||||
/*
|
||||
Fetch info before fetching warnings, since it will be reset
|
||||
@ -9860,6 +9883,7 @@ static sig_handler signal_handler(int sig)
|
||||
fflush(stderr);
|
||||
my_write_core(sig);
|
||||
#ifndef _WIN32
|
||||
sf_leaking_memory= 1;
|
||||
exit(1); // Shouldn't get here but just in case
|
||||
#endif
|
||||
}
|
||||
@ -9933,12 +9957,10 @@ int main(int argc, char **argv)
|
||||
uint command_executed= 0, last_command_executed= 0;
|
||||
char save_file[FN_REFLEN];
|
||||
bool empty_result= FALSE;
|
||||
int error= 0;
|
||||
MY_INIT(argv[0]);
|
||||
DBUG_ENTER("main");
|
||||
|
||||
/* mysqltest has no way to free all its memory correctly */
|
||||
sf_leaking_memory= 1;
|
||||
|
||||
save_file[0]= 0;
|
||||
TMPDIR[0]= 0;
|
||||
|
||||
@ -10631,7 +10653,7 @@ int main(int argc, char **argv)
|
||||
die("Test ended with parsing disabled");
|
||||
|
||||
/*
|
||||
The whole test has been executed _successfully_.
|
||||
The whole test has been executed successfully.
|
||||
Time to compare result or save it to record file.
|
||||
The entire output from test is in the log file
|
||||
*/
|
||||
@ -10654,7 +10676,7 @@ int main(int argc, char **argv)
|
||||
else
|
||||
{
|
||||
/* Check that the output from test is equal to result file */
|
||||
check_result();
|
||||
error= check_result();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -10664,7 +10686,8 @@ int main(int argc, char **argv)
|
||||
if (! result_file_name || record ||
|
||||
compare_files (log_file.file_name(), result_file_name))
|
||||
{
|
||||
die("The test didn't produce any output");
|
||||
fprintf(stderr, "mysqltest: The test didn't produce any output\n");
|
||||
error= 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -10673,12 +10696,15 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (!command_executed && result_file_name && !empty_result)
|
||||
die("No queries executed but non-empty result file found!");
|
||||
{
|
||||
fprintf(stderr, "mysqltest: No queries executed but non-empty result file found!\n");
|
||||
error= 1;
|
||||
}
|
||||
|
||||
verbose_msg("Test has succeeded!");
|
||||
if (!error)
|
||||
verbose_msg("Test has succeeded!");
|
||||
timer_output();
|
||||
/* Yes, if we got this far the test has succeeded! Sakila smiles */
|
||||
cleanup_and_exit(0);
|
||||
cleanup_and_exit(error, 0);
|
||||
return 0; /* Keep compiler happy too */
|
||||
}
|
||||
|
||||
|
@ -34,8 +34,5 @@ ELSE()
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGE_FILES -maix64 -pthread -mcmodel=large")
|
||||
ENDIF()
|
||||
|
||||
# fcntl(fd, F_SETFL, O_DIRECT) is not supported; O_DIRECT is an open(2) flag
|
||||
SET(HAVE_FCNTL_DIRECT 0 CACHE INTERNAL "")
|
||||
|
||||
# make it WARN by default, not AUTO (that implies -Werror)
|
||||
SET(MYSQL_MAINTAINER_MODE "WARN" CACHE STRING "Enable MariaDB maintainer-specific warnings. One of: NO (warnings are disabled) WARN (warnings are enabled) ERR (warnings are errors) AUTO (warnings are errors in Debug only)")
|
||||
|
@ -17,10 +17,6 @@ INCLUDE(CheckSymbolExists)
|
||||
INCLUDE(CheckCSourceRuns)
|
||||
INCLUDE(CheckCSourceCompiles)
|
||||
|
||||
# fcntl(fd, F_SETFL, O_DIRECT) is not supported,
|
||||
# and directio(3C) would only work on UFS or NFS, not ZFS.
|
||||
SET(HAVE_FCNTL_DIRECT 0 CACHE INTERNAL "")
|
||||
|
||||
# Enable 64 bit file offsets
|
||||
SET(_FILE_OFFSET_BITS 64)
|
||||
|
||||
|
@ -44,7 +44,6 @@ SET(HAVE_EXECINFO_H CACHE INTERNAL "")
|
||||
SET(HAVE_FCHMOD CACHE INTERNAL "")
|
||||
SET(HAVE_FCNTL CACHE INTERNAL "")
|
||||
SET(HAVE_FCNTL_H 1 CACHE INTERNAL "")
|
||||
SET(HAVE_FCNTL_DIRECT 0 CACHE INTERNAL "")
|
||||
SET(HAVE_FCNTL_NONBLOCK CACHE INTERNAL "")
|
||||
SET(HAVE_FDATASYNC CACHE INTERNAL "")
|
||||
SET(HAVE_DECL_FDATASYNC CACHE INTERNAL "")
|
||||
|
@ -4,6 +4,9 @@ SET(WITH_PCRE "auto" CACHE STRING
|
||||
"Which pcre to use (possible values are 'bundled', 'system', or 'auto')")
|
||||
|
||||
MACRO(BUNDLE_PCRE2)
|
||||
SET(WITH_PCRE "bundled" CACHE STRING
|
||||
"Which pcre to use (possible values are 'bundled', 'system', or 'auto')")
|
||||
|
||||
SET(dir "${CMAKE_BINARY_DIR}/extra/pcre2")
|
||||
SET(PCRE_INCLUDE_DIRS ${dir}/src/pcre2-build ${dir}/src/pcre2/src)
|
||||
MESSAGE(STATUS "Will download and bundle pcre2")
|
||||
|
@ -53,7 +53,7 @@ MACRO (MYSQL_USE_BUNDLED_SSL)
|
||||
${CMAKE_SOURCE_DIR}/extra/wolfssl/wolfssl
|
||||
${CMAKE_SOURCE_DIR}/extra/wolfssl/wolfssl/wolfssl
|
||||
)
|
||||
SET(SSL_LIBRARIES wolfssl wolfcrypt)
|
||||
SET(SSL_LIBRARIES wolfssl)
|
||||
SET(SSL_INCLUDE_DIRS ${INC_DIRS})
|
||||
SET(SSL_DEFINES "-DHAVE_OPENSSL -DHAVE_WOLFSSL -DWOLFSSL_USER_SETTINGS")
|
||||
SET(HAVE_ERR_remove_thread_state ON CACHE INTERNAL "wolfssl doesn't have ERR_remove_thread_state")
|
||||
|
@ -30,7 +30,6 @@
|
||||
#cmakedefine HAVE_DLFCN_H 1
|
||||
#cmakedefine HAVE_EXECINFO_H 1
|
||||
#cmakedefine HAVE_FCNTL_H 1
|
||||
#cmakedefine HAVE_FCNTL_DIRECT 1
|
||||
#cmakedefine HAVE_FENV_H 1
|
||||
#cmakedefine HAVE_FLOAT_H 1
|
||||
#cmakedefine HAVE_FNMATCH_H 1
|
||||
|
@ -706,7 +706,6 @@ CHECK_SYMBOL_EXISTS(O_NONBLOCK "unistd.h;fcntl.h" HAVE_FCNTL_NONBLOCK)
|
||||
IF(NOT HAVE_FCNTL_NONBLOCK)
|
||||
SET(NO_FCNTL_NONBLOCK 1)
|
||||
ENDIF()
|
||||
CHECK_SYMBOL_EXISTS(O_DIRECT "fcntl.h" HAVE_FCNTL_DIRECT)
|
||||
|
||||
#
|
||||
# Test for how the C compiler does inline, if at all
|
||||
|
1
debian/mariadb-server.install
vendored
1
debian/mariadb-server.install
vendored
@ -70,6 +70,7 @@ usr/share/man/man1/myisam_ftdump.1
|
||||
usr/share/man/man1/myisamchk.1
|
||||
usr/share/man/man1/myisamlog.1
|
||||
usr/share/man/man1/myisampack.1
|
||||
usr/share/man/man1/wsrep_sst_backup.1
|
||||
usr/share/man/man1/wsrep_sst_common.1
|
||||
usr/share/man/man1/wsrep_sst_mariabackup.1
|
||||
usr/share/man/man1/wsrep_sst_mysqldump.1
|
||||
|
2
debian/mariadb-server.mariadb.init
vendored
2
debian/mariadb-server.mariadb.init
vendored
@ -88,7 +88,7 @@ sanity_checks() {
|
||||
# If datadir location is not changed int configuration
|
||||
# then it's not printed with /usr/sbin/mariadbd --print-defaults
|
||||
# then we use 'sane' default.
|
||||
if [ -z "$datadir"]
|
||||
if [ -z "$datadir" ]
|
||||
then
|
||||
datadir="/var/lib/mysql"
|
||||
fi
|
||||
|
1
debian/rules
vendored
1
debian/rules
vendored
@ -98,7 +98,6 @@ endif
|
||||
-DCOMPILATION_COMMENT="mariadb.org binary distribution" \
|
||||
-DMYSQL_SERVER_SUFFIX="-$(DEB_VERSION_REVISION)" \
|
||||
-DSYSTEM_TYPE="debian-$(DEB_HOST_GNU_SYSTEM)" \
|
||||
-DCMAKE_SYSTEM_PROCESSOR=$(DEB_HOST_ARCH) \
|
||||
-DBUILD_CONFIG=mysql_release \
|
||||
-DCONC_DEFAULT_CHARSET=utf8mb4 \
|
||||
-DPLUGIN_AWS_KEY_MANAGEMENT=NO \
|
||||
|
@ -31,6 +31,7 @@ ENDIF()
|
||||
INCLUDE_DIRECTORIES(
|
||||
${CMAKE_SOURCE_DIR}/include
|
||||
${CMAKE_SOURCE_DIR}/sql
|
||||
${CMAKE_SOURCE_DIR}/storage/maria
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/quicklz
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
@ -56,7 +57,6 @@ ENDIF()
|
||||
MYSQL_ADD_EXECUTABLE(mariadb-backup
|
||||
xtrabackup.cc
|
||||
innobackupex.cc
|
||||
changed_page_bitmap.cc
|
||||
datasink.cc
|
||||
ds_buffer.cc
|
||||
ds_compress.cc
|
||||
@ -72,8 +72,12 @@ MYSQL_ADD_EXECUTABLE(mariadb-backup
|
||||
xbstream_write.cc
|
||||
backup_mysql.cc
|
||||
backup_copy.cc
|
||||
xb_plugin.cc
|
||||
encryption_plugin.cc
|
||||
${PROJECT_BINARY_DIR}/sql/sql_builtin.cc
|
||||
aria_backup_client.cc
|
||||
thread_pool.cc
|
||||
ddl_log.cc
|
||||
common_engine.cc
|
||||
${PROJECT_SOURCE_DIR}/sql/net_serv.cc
|
||||
${PROJECT_SOURCE_DIR}/libmysqld/libmysql.c
|
||||
COMPONENT Backup
|
||||
@ -82,7 +86,8 @@ MYSQL_ADD_EXECUTABLE(mariadb-backup
|
||||
# Export all symbols on Unix, for better crash callstacks
|
||||
SET_TARGET_PROPERTIES(mariadb-backup PROPERTIES ENABLE_EXPORTS TRUE)
|
||||
|
||||
TARGET_LINK_LIBRARIES(mariadb-backup sql sql_builtins)
|
||||
TARGET_LINK_LIBRARIES(mariadb-backup sql sql_builtins aria)
|
||||
|
||||
IF(NOT HAVE_SYSTEM_REGEX)
|
||||
TARGET_LINK_LIBRARIES(mariadb-backup pcre2-posix)
|
||||
ENDIF()
|
||||
|
1016
extra/mariabackup/aria_backup_client.cc
Normal file
1016
extra/mariabackup/aria_backup_client.cc
Normal file
File diff suppressed because it is too large
Load Diff
38
extra/mariabackup/aria_backup_client.h
Normal file
38
extra/mariabackup/aria_backup_client.h
Normal file
@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
#include "my_global.h"
|
||||
#include "datasink.h"
|
||||
#include "backup_mysql.h"
|
||||
#include "thread_pool.h"
|
||||
#include "xtrabackup.h"
|
||||
|
||||
namespace aria {
|
||||
|
||||
bool prepare(const char *target_dir);
|
||||
|
||||
class BackupImpl;
|
||||
|
||||
class Backup {
|
||||
public:
|
||||
Backup(const char *datadir_path,
|
||||
const char *aria_log_path,
|
||||
ds_ctxt_t *datasink,
|
||||
std::vector<MYSQL *> &con_pool, ThreadPool &thread_pool);
|
||||
~Backup();
|
||||
Backup (Backup &&other) = delete;
|
||||
Backup & operator= (Backup &&other) = delete;
|
||||
Backup(const Backup &) = delete;
|
||||
Backup & operator= (const Backup &) = delete;
|
||||
bool init();
|
||||
bool start(bool no_lock);
|
||||
bool wait_for_finish();
|
||||
bool copy_offline_tables(
|
||||
const std::unordered_set<table_key_t> *exclude_tables, bool no_lock,
|
||||
bool copy_stats);
|
||||
bool finalize();
|
||||
bool copy_log_tail();
|
||||
void set_post_copy_table_hook(const post_copy_table_hook_t &hook);
|
||||
private:
|
||||
BackupImpl *m_backup_impl;
|
||||
};
|
||||
|
||||
} // namespace aria
|
@ -41,6 +41,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
*******************************************************/
|
||||
|
||||
#include <my_global.h>
|
||||
#include <my_config.h>
|
||||
#include <unireg.h>
|
||||
#include <datadict.h>
|
||||
#include <os0file.h>
|
||||
#include <my_dir.h>
|
||||
#include <ut0mem.h>
|
||||
@ -66,19 +69,26 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include <aclapi.h>
|
||||
#endif
|
||||
|
||||
#ifdef MYSQL_CLIENT
|
||||
#define WAS_MYSQL_CLIENT 1
|
||||
#undef MYSQL_CLIENT
|
||||
#endif
|
||||
|
||||
#include "table.h"
|
||||
|
||||
#ifdef WAS_MYSQL_CLIENT
|
||||
#define MYSQL_CLIENT 1
|
||||
#undef WAS_MYSQL_CLIENT
|
||||
#endif
|
||||
|
||||
#define ROCKSDB_BACKUP_DIR "#rocksdb"
|
||||
|
||||
/* list of files to sync for --rsync mode */
|
||||
static std::set<std::string> rsync_list;
|
||||
/* locations of tablespaces read from .isl files */
|
||||
static std::map<std::string, std::string> tablespace_locations;
|
||||
|
||||
/* Whether LOCK BINLOG FOR BACKUP has been issued during backup */
|
||||
bool binlog_locked;
|
||||
|
||||
static void rocksdb_create_checkpoint();
|
||||
static bool has_rocksdb_plugin();
|
||||
static void rocksdb_backup_checkpoint(ds_ctxt *ds_data);
|
||||
static void rocksdb_copy_back(ds_ctxt *ds_data);
|
||||
|
||||
@ -135,10 +145,6 @@ struct datadir_thread_ctxt_t {
|
||||
bool ret;
|
||||
};
|
||||
|
||||
static bool backup_files_from_datadir(ds_ctxt_t *ds_data,
|
||||
const char *dir_path,
|
||||
const char *prefix);
|
||||
|
||||
/************************************************************************
|
||||
Retirn true if character if file separator */
|
||||
bool
|
||||
@ -585,7 +591,6 @@ datafile_read(datafile_cur_t *cursor)
|
||||
Check to see if a file exists.
|
||||
Takes name of the file to check.
|
||||
@return true if file exists. */
|
||||
static
|
||||
bool
|
||||
file_exists(const char *filename)
|
||||
{
|
||||
@ -601,7 +606,6 @@ file_exists(const char *filename)
|
||||
|
||||
/************************************************************************
|
||||
Trim leading slashes from absolute path so it becomes relative */
|
||||
static
|
||||
const char *
|
||||
trim_dotslash(const char *path)
|
||||
{
|
||||
@ -634,7 +638,7 @@ ends_with(const char *str, const char *suffix)
|
||||
&& strcmp(str + str_len - suffix_len, suffix) == 0);
|
||||
}
|
||||
|
||||
static bool starts_with(const char *str, const char *prefix)
|
||||
bool starts_with(const char *str, const char *prefix)
|
||||
{
|
||||
return strncmp(str, prefix, strlen(prefix)) == 0;
|
||||
}
|
||||
@ -785,7 +789,6 @@ directory_exists_and_empty(const char *dir, const char *comment)
|
||||
/************************************************************************
|
||||
Check if file name ends with given set of suffixes.
|
||||
@return true if it does. */
|
||||
static
|
||||
bool
|
||||
filename_matches(const char *filename, const char **ext_list)
|
||||
{
|
||||
@ -800,6 +803,115 @@ filename_matches(const char *filename, const char **ext_list)
|
||||
return(false);
|
||||
}
|
||||
|
||||
// TODO: the code can be used to find storage engine of partitions
|
||||
/*
|
||||
static
|
||||
bool is_aria_frm_or_par(const char *path) {
|
||||
if (!ends_with(path, ".frm") && !ends_with(path, ".par"))
|
||||
return false;
|
||||
|
||||
const char *frm_path = path;
|
||||
if (ends_with(path, ".par")) {
|
||||
size_t frm_path_len = strlen(path);
|
||||
DBUG_ASSERT(frm_path_len > strlen("frm"));
|
||||
frm_path = strdup(path);
|
||||
strcpy(const_cast<char *>(frm_path) + frm_path_len - strlen("frm"), "frm");
|
||||
}
|
||||
|
||||
bool result = false;
|
||||
File file;
|
||||
uchar header[40];
|
||||
legacy_db_type dbt;
|
||||
|
||||
if ((file= mysql_file_open(key_file_frm, frm_path, O_RDONLY | O_SHARE, MYF(0)))
|
||||
< 0)
|
||||
goto err;
|
||||
|
||||
if (mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP)))
|
||||
goto err;
|
||||
|
||||
if (!strncmp((char*) header, "TYPE=VIEW\n", 10))
|
||||
goto err;
|
||||
|
||||
if (!is_binary_frm_header(header))
|
||||
goto err;
|
||||
|
||||
dbt = (legacy_db_type)header[3];
|
||||
|
||||
if (dbt == DB_TYPE_ARIA) {
|
||||
result = true;
|
||||
}
|
||||
else if (dbt == DB_TYPE_PARTITION_DB) {
|
||||
MY_STAT state;
|
||||
uchar *frm_image= 0;
|
||||
// uint n_length;
|
||||
|
||||
if (mysql_file_fstat(file, &state, MYF(MY_WME)))
|
||||
goto err;
|
||||
|
||||
if (mysql_file_seek(file, 0, SEEK_SET, MYF(MY_WME)))
|
||||
goto err;
|
||||
|
||||
if (read_string(file, &frm_image, (size_t)state.st_size))
|
||||
goto err;
|
||||
|
||||
dbt = (legacy_db_type)frm_image[61];
|
||||
if (dbt == DB_TYPE_ARIA) {
|
||||
result = true;
|
||||
}
|
||||
my_free(frm_image);
|
||||
}
|
||||
|
||||
err:
|
||||
if (file >= 0)
|
||||
mysql_file_close(file, MYF(MY_WME));
|
||||
if (frm_path != path)
|
||||
free(const_cast<char *>(frm_path));
|
||||
return result;
|
||||
}
|
||||
*/
|
||||
|
||||
void parse_db_table_from_file_path(
|
||||
const char *filepath, char *dbname, char *tablename) {
|
||||
dbname[0] = '\0';
|
||||
tablename[0] = '\0';
|
||||
const char *dbname_start = nullptr;
|
||||
const char *tablename_start = filepath;
|
||||
const char *const_ptr;
|
||||
while ((const_ptr = strchr(tablename_start, FN_LIBCHAR)) != NULL) {
|
||||
dbname_start = tablename_start;
|
||||
tablename_start = const_ptr + 1;
|
||||
}
|
||||
if (!dbname_start)
|
||||
return;
|
||||
size_t dbname_len = tablename_start - dbname_start - 1;
|
||||
if (dbname_len >= FN_REFLEN)
|
||||
dbname_len = FN_REFLEN-1;
|
||||
strmake(dbname, dbname_start, dbname_len);
|
||||
strmake(tablename, tablename_start, FN_REFLEN-1);
|
||||
char *ptr;
|
||||
if ((ptr = strchr(tablename, '.')))
|
||||
*ptr = '\0';
|
||||
if ((ptr = strstr(tablename, "#P#")))
|
||||
*ptr = '\0';
|
||||
}
|
||||
|
||||
bool is_system_table(const char *dbname, const char *tablename)
|
||||
{
|
||||
DBUG_ASSERT(dbname);
|
||||
DBUG_ASSERT(tablename);
|
||||
|
||||
LEX_CSTRING lex_dbname;
|
||||
LEX_CSTRING lex_tablename;
|
||||
lex_dbname.str = dbname;
|
||||
lex_dbname.length = strlen(dbname);
|
||||
lex_tablename.str = tablename;
|
||||
lex_tablename.length = strlen(tablename);
|
||||
|
||||
TABLE_CATEGORY tg = get_table_category(&lex_dbname, &lex_tablename);
|
||||
|
||||
return (tg == TABLE_CATEGORY_LOG) || (tg == TABLE_CATEGORY_SYSTEM);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Copy data file for backup. Also check if it is allowed to copy by
|
||||
@ -810,9 +922,8 @@ static
|
||||
bool
|
||||
datafile_copy_backup(ds_ctxt *ds_data, const char *filepath, uint thread_n)
|
||||
{
|
||||
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
|
||||
"MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
|
||||
NULL};
|
||||
const char *ext_list[] = {".frm", ".isl", ".TRG", ".TRN", ".opt", ".par",
|
||||
NULL};
|
||||
|
||||
/* Get the name and the path for the tablespace. node->name always
|
||||
contains the path (which may be absolute for remote tablespaces in
|
||||
@ -830,42 +941,7 @@ datafile_copy_backup(ds_ctxt *ds_data, const char *filepath, uint thread_n)
|
||||
|
||||
if (filename_matches(filepath, ext_list)) {
|
||||
return ds_data->copy_file(filepath, filepath, thread_n);
|
||||
}
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
|
||||
/************************************************************************
|
||||
Same as datafile_copy_backup, but put file name into the list for
|
||||
rsync command. */
|
||||
static
|
||||
bool
|
||||
datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f)
|
||||
{
|
||||
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
|
||||
"MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
|
||||
NULL};
|
||||
|
||||
/* Get the name and the path for the tablespace. node->name always
|
||||
contains the path (which may be absolute for remote tablespaces in
|
||||
5.6+). space->name contains the tablespace name in the form
|
||||
"./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a
|
||||
multi-node shared tablespace, space->name contains the name of the first
|
||||
node, but that's irrelevant, since we only need node_name to match them
|
||||
against filters, and the shared tablespace is always copied regardless
|
||||
of the filters value. */
|
||||
|
||||
if (check_if_skip_table(filepath)) {
|
||||
return(true);
|
||||
}
|
||||
|
||||
if (filename_matches(filepath, ext_list)) {
|
||||
fprintf(f, "%s\n", filepath);
|
||||
if (save_to_list) {
|
||||
rsync_list.insert(filepath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return(true);
|
||||
}
|
||||
@ -1004,16 +1080,15 @@ Copy file for backup/restore.
|
||||
bool
|
||||
ds_ctxt_t::copy_file(const char *src_file_path,
|
||||
const char *dst_file_path,
|
||||
uint thread_n)
|
||||
uint thread_n,
|
||||
bool rewrite)
|
||||
{
|
||||
char dst_name[FN_REFLEN];
|
||||
ds_file_t *dstfile = NULL;
|
||||
datafile_cur_t cursor;
|
||||
xb_fil_cur_result_t res;
|
||||
DBUG_ASSERT(datasink->remove);
|
||||
const char *dst_path =
|
||||
(xtrabackup_copy_back || xtrabackup_move_back)?
|
||||
dst_file_path : trim_dotslash(dst_file_path);
|
||||
const char *dst_path = convert_dst(dst_file_path);
|
||||
|
||||
if (!datafile_open(src_file_path, &cursor, thread_n)) {
|
||||
goto error_close;
|
||||
@ -1021,7 +1096,7 @@ ds_ctxt_t::copy_file(const char *src_file_path,
|
||||
|
||||
strncpy(dst_name, cursor.rel_path, sizeof(dst_name));
|
||||
|
||||
dstfile = ds_open(this, dst_path, &cursor.statinfo);
|
||||
dstfile = ds_open(this, dst_path, &cursor.statinfo, rewrite);
|
||||
if (dstfile == NULL) {
|
||||
msg(thread_n,"error: "
|
||||
"cannot open the destination stream for %s", dst_name);
|
||||
@ -1245,278 +1320,45 @@ cleanup:
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static
|
||||
bool
|
||||
backup_files(ds_ctxt *ds_data, const char *from, bool prep_mode)
|
||||
backup_files(ds_ctxt *ds_data, const char *from)
|
||||
{
|
||||
char rsync_tmpfile_name[FN_REFLEN];
|
||||
FILE *rsync_tmpfile = NULL;
|
||||
datadir_iter_t *it;
|
||||
datadir_node_t node;
|
||||
bool ret = true;
|
||||
|
||||
if (prep_mode && !opt_rsync) {
|
||||
return(true);
|
||||
}
|
||||
|
||||
if (opt_rsync) {
|
||||
snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name),
|
||||
"%s/%s%d", opt_mysql_tmpdir,
|
||||
"xtrabackup_rsyncfiles_pass",
|
||||
prep_mode ? 1 : 2);
|
||||
rsync_tmpfile = fopen(rsync_tmpfile_name, "w");
|
||||
if (rsync_tmpfile == NULL) {
|
||||
msg("Error: can't create file %s",
|
||||
rsync_tmpfile_name);
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
|
||||
msg("Starting %s non-InnoDB tables and files",
|
||||
prep_mode ? "prep copy of" : "to backup");
|
||||
|
||||
msg("Starting to backup non-InnoDB tables and files");
|
||||
datadir_node_init(&node);
|
||||
it = datadir_iter_new(from);
|
||||
|
||||
while (datadir_iter_next(it, &node)) {
|
||||
|
||||
if (!node.is_empty_dir) {
|
||||
if (opt_rsync) {
|
||||
ret = datafile_rsync_backup(node.filepath,
|
||||
!prep_mode, rsync_tmpfile);
|
||||
} else {
|
||||
ret = datafile_copy_backup(ds_data, node.filepath, 1);
|
||||
}
|
||||
ret = datafile_copy_backup(ds_data, node.filepath, 1);
|
||||
if (!ret) {
|
||||
msg("Failed to copy file %s", node.filepath);
|
||||
goto out;
|
||||
}
|
||||
} else if (!prep_mode) {
|
||||
} else {
|
||||
/* backup fake file into empty directory */
|
||||
char path[FN_REFLEN];
|
||||
snprintf(path, sizeof(path),
|
||||
"%s/db.opt", node.filepath);
|
||||
if (!(ret = ds_data->backup_file_printf(
|
||||
trim_dotslash(path), "%s", ""))) {
|
||||
snprintf(path, sizeof(path), "%s/db.opt", node.filepath);
|
||||
if (!(ret = ds_data->backup_file_printf(trim_dotslash(path), "%s", ""))) {
|
||||
msg("Failed to create file %s", path);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (opt_rsync) {
|
||||
std::stringstream cmd;
|
||||
int err;
|
||||
|
||||
if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
|
||||
fprintf(rsync_tmpfile, "%s\n", buffer_pool_filename);
|
||||
rsync_list.insert(buffer_pool_filename);
|
||||
}
|
||||
if (file_exists("ib_lru_dump")) {
|
||||
fprintf(rsync_tmpfile, "%s\n", "ib_lru_dump");
|
||||
rsync_list.insert("ib_lru_dump");
|
||||
}
|
||||
|
||||
fclose(rsync_tmpfile);
|
||||
rsync_tmpfile = NULL;
|
||||
|
||||
cmd << "rsync -t . --files-from=" << rsync_tmpfile_name
|
||||
<< " " << xtrabackup_target_dir;
|
||||
|
||||
msg("Starting rsync as: %s", cmd.str().c_str());
|
||||
if ((err = system(cmd.str().c_str()) && !prep_mode) != 0) {
|
||||
msg("Error: rsync failed with error code %d", err);
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
msg("rsync finished successfully.");
|
||||
|
||||
if (!prep_mode && !opt_no_lock) {
|
||||
char path[FN_REFLEN];
|
||||
char dst_path[FN_REFLEN];
|
||||
char *newline;
|
||||
|
||||
/* Remove files that have been removed between first and
|
||||
second passes. Cannot use "rsync --delete" because it
|
||||
does not work with --files-from. */
|
||||
snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name),
|
||||
"%s/%s", opt_mysql_tmpdir,
|
||||
"xtrabackup_rsyncfiles_pass1");
|
||||
|
||||
rsync_tmpfile = fopen(rsync_tmpfile_name, "r");
|
||||
if (rsync_tmpfile == NULL) {
|
||||
msg("Error: can't open file %s",
|
||||
rsync_tmpfile_name);
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (fgets(path, sizeof(path), rsync_tmpfile)) {
|
||||
|
||||
newline = strchr(path, '\n');
|
||||
if (newline) {
|
||||
*newline = 0;
|
||||
}
|
||||
if (rsync_list.count(path) < 1) {
|
||||
snprintf(dst_path, sizeof(dst_path),
|
||||
"%s/%s", xtrabackup_target_dir,
|
||||
path);
|
||||
msg("Removing %s", dst_path);
|
||||
unlink(dst_path);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(rsync_tmpfile);
|
||||
rsync_tmpfile = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
msg("Finished %s non-InnoDB tables and files",
|
||||
prep_mode ? "a prep copy of" : "backing up");
|
||||
|
||||
msg("Finished backing up non-InnoDB tables and files");
|
||||
out:
|
||||
datadir_iter_free(it);
|
||||
datadir_node_free(&node);
|
||||
|
||||
if (rsync_tmpfile != NULL) {
|
||||
fclose(rsync_tmpfile);
|
||||
}
|
||||
|
||||
return(ret);
|
||||
}
|
||||
|
||||
|
||||
lsn_t get_current_lsn(MYSQL *connection)
|
||||
{
|
||||
static const char lsn_prefix[] = "\nLog sequence number ";
|
||||
lsn_t lsn = 0;
|
||||
if (MYSQL_RES *res = xb_mysql_query(connection,
|
||||
"SHOW ENGINE INNODB STATUS",
|
||||
true, false)) {
|
||||
if (MYSQL_ROW row = mysql_fetch_row(res)) {
|
||||
const char *p= strstr(row[2], lsn_prefix);
|
||||
DBUG_ASSERT(p);
|
||||
if (p) {
|
||||
p += sizeof lsn_prefix - 1;
|
||||
lsn = lsn_t(strtoll(p, NULL, 10));
|
||||
}
|
||||
}
|
||||
mysql_free_result(res);
|
||||
}
|
||||
return lsn;
|
||||
}
|
||||
|
||||
lsn_t server_lsn_after_lock;
|
||||
extern void backup_wait_for_lsn(lsn_t lsn);
|
||||
/** Start --backup */
|
||||
bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta,
|
||||
CorruptedPages &corrupted_pages)
|
||||
{
|
||||
if (!opt_no_lock) {
|
||||
if (opt_safe_slave_backup) {
|
||||
if (!wait_for_safe_slave(mysql_connection)) {
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (!backup_files(ds_data, fil_path_to_mysql_datadir, true)) {
|
||||
return(false);
|
||||
}
|
||||
|
||||
history_lock_time = time(NULL);
|
||||
|
||||
if (!lock_tables(mysql_connection)) {
|
||||
return(false);
|
||||
}
|
||||
server_lsn_after_lock = get_current_lsn(mysql_connection);
|
||||
}
|
||||
|
||||
if (!backup_files(ds_data, fil_path_to_mysql_datadir, false)) {
|
||||
return(false);
|
||||
}
|
||||
|
||||
if (!backup_files_from_datadir(ds_data, fil_path_to_mysql_datadir,
|
||||
"aws-kms-key") ||
|
||||
!backup_files_from_datadir(ds_data,
|
||||
aria_log_dir_path,
|
||||
"aria_log")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (has_rocksdb_plugin()) {
|
||||
rocksdb_create_checkpoint();
|
||||
}
|
||||
|
||||
msg("Waiting for log copy thread to read lsn %llu", (ulonglong)server_lsn_after_lock);
|
||||
backup_wait_for_lsn(server_lsn_after_lock);
|
||||
DBUG_EXECUTE_FOR_KEY("sleep_after_waiting_for_lsn", {},
|
||||
{
|
||||
ulong milliseconds = strtoul(dbug_val, NULL, 10);
|
||||
msg("sleep_after_waiting_for_lsn");
|
||||
my_sleep(milliseconds*1000UL);
|
||||
});
|
||||
|
||||
corrupted_pages.backup_fix_ddl(ds_data, ds_meta);
|
||||
|
||||
// There is no need to stop slave thread before coping non-Innodb data when
|
||||
// --no-lock option is used because --no-lock option requires that no DDL or
|
||||
// DML to non-transaction tables can occur.
|
||||
if (opt_no_lock) {
|
||||
if (opt_safe_slave_backup) {
|
||||
if (!wait_for_safe_slave(mysql_connection)) {
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (opt_slave_info) {
|
||||
lock_binlog_maybe(mysql_connection);
|
||||
|
||||
if (!write_slave_info(ds_data, mysql_connection)) {
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
|
||||
/* The only reason why Galera/binlog info is written before
|
||||
wait_for_ibbackup_log_copy_finish() is that after that call the xtrabackup
|
||||
binary will start streamig a temporary copy of REDO log to stdout and
|
||||
thus, any streaming from innobackupex would interfere. The only way to
|
||||
avoid that is to have a single process, i.e. merge innobackupex and
|
||||
xtrabackup. */
|
||||
if (opt_galera_info) {
|
||||
if (!write_galera_info(ds_data, mysql_connection)) {
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (opt_binlog_info == BINLOG_INFO_ON) {
|
||||
|
||||
lock_binlog_maybe(mysql_connection);
|
||||
write_binlog_info(ds_data, mysql_connection);
|
||||
}
|
||||
|
||||
if (!opt_no_lock) {
|
||||
msg("Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS...");
|
||||
xb_mysql_query(mysql_connection,
|
||||
"FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false);
|
||||
}
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/** Release resources after backup_start() */
|
||||
void backup_release()
|
||||
{
|
||||
/* release all locks */
|
||||
if (!opt_no_lock) {
|
||||
unlock_all(mysql_connection);
|
||||
history_lock_time = 0;
|
||||
} else {
|
||||
history_lock_time = time(NULL) - history_lock_time;
|
||||
}
|
||||
|
||||
if (opt_lock_ddl_per_table) {
|
||||
mdl_unlock_all();
|
||||
}
|
||||
@ -1534,7 +1376,7 @@ static const char *default_buffer_pool_file = "ib_buffer_pool";
|
||||
bool backup_finish(ds_ctxt *ds_data)
|
||||
{
|
||||
/* Copy buffer pool dump or LRU dump */
|
||||
if (!opt_rsync && opt_galera_info) {
|
||||
if (opt_galera_info) {
|
||||
if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
|
||||
ds_data->copy_file(buffer_pool_filename, default_buffer_pool_file, 0);
|
||||
}
|
||||
@ -1897,8 +1739,6 @@ copy_back()
|
||||
return(false);
|
||||
}
|
||||
|
||||
srv_max_n_threads = 1000;
|
||||
|
||||
/* copy undo tablespaces */
|
||||
|
||||
Copy_back_dst_dir dst_dir_buf;
|
||||
@ -1926,7 +1766,8 @@ copy_back()
|
||||
|
||||
dst_dir = dst_dir_buf.make(srv_log_group_home_dir);
|
||||
|
||||
/* --backup generates a single ib_logfile0, which we must copy. */
|
||||
/* --backup generates a single LOG_FILE_NAME, which we must copy
|
||||
if it exists. */
|
||||
|
||||
ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL);
|
||||
if (!(ret = copy_or_move_file(ds_tmp, LOG_FILE_NAME, LOG_FILE_NAME,
|
||||
@ -2163,8 +2004,6 @@ decrypt_decompress()
|
||||
bool ret;
|
||||
datadir_iter_t *it = NULL;
|
||||
|
||||
srv_max_n_threads = 1000;
|
||||
|
||||
/* cd to backup directory */
|
||||
if (my_setwd(xtrabackup_target_dir, MYF(MY_WME)))
|
||||
{
|
||||
@ -2177,8 +2016,6 @@ decrypt_decompress()
|
||||
|
||||
it = datadir_iter_new(".", false);
|
||||
|
||||
ut_a(xtrabackup_parallel >= 0);
|
||||
|
||||
ret = run_data_threads(it, decrypt_decompress_thread_func,
|
||||
xtrabackup_parallel ? xtrabackup_parallel : 1);
|
||||
|
||||
@ -2200,9 +2037,9 @@ decrypt_decompress()
|
||||
Do not copy the Innodb files (ibdata1, redo log files),
|
||||
as this is done in a separate step.
|
||||
*/
|
||||
static bool backup_files_from_datadir(ds_ctxt_t *ds_data,
|
||||
const char *dir_path,
|
||||
const char *prefix)
|
||||
bool backup_files_from_datadir(ds_ctxt_t *ds_data,
|
||||
const char *dir_path,
|
||||
const char *prefix)
|
||||
{
|
||||
os_file_dir_t dir = os_file_opendir(dir_path);
|
||||
if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false;
|
||||
@ -2226,10 +2063,6 @@ static bool backup_files_from_datadir(ds_ctxt_t *ds_data,
|
||||
pname = info.name;
|
||||
|
||||
if (!starts_with(pname, prefix))
|
||||
/* For ES exchange the above line with the following code:
|
||||
(!xtrabackup_prepare || !xtrabackup_incremental_dir ||
|
||||
!starts_with(pname, "aria_log")))
|
||||
*/
|
||||
continue;
|
||||
|
||||
if (xtrabackup_prepare && xtrabackup_incremental_dir &&
|
||||
@ -2252,7 +2085,7 @@ static int rocksdb_remove_checkpoint_directory()
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool has_rocksdb_plugin()
|
||||
bool has_rocksdb_plugin()
|
||||
{
|
||||
static bool first_time = true;
|
||||
static bool has_plugin= false;
|
||||
@ -2398,7 +2231,7 @@ static void rocksdb_unlock_checkpoint()
|
||||
#define MARIADB_CHECKPOINT_DIR "mariabackup-checkpoint"
|
||||
static char rocksdb_checkpoint_dir[FN_REFLEN];
|
||||
|
||||
static void rocksdb_create_checkpoint()
|
||||
void rocksdb_create_checkpoint()
|
||||
{
|
||||
MYSQL_RES *result = xb_mysql_query(mysql_connection, "SELECT @@rocksdb_datadir,@@datadir", true, true);
|
||||
MYSQL_ROW row = mysql_fetch_row(result);
|
||||
@ -2478,3 +2311,39 @@ static void rocksdb_copy_back(ds_ctxt *ds_data) {
|
||||
mkdirp(rocksdb_home_dir, 0777, MYF(0));
|
||||
ds_data->copy_or_move_dir(ROCKSDB_BACKUP_DIR, rocksdb_home_dir, xtrabackup_copy_back, xtrabackup_copy_back);
|
||||
}
|
||||
|
||||
void foreach_file_in_db_dirs(
|
||||
const char *dir_path, std::function<bool(const char *)> func) {
|
||||
DBUG_ASSERT(dir_path);
|
||||
|
||||
datadir_iter_t *it;
|
||||
datadir_node_t node;
|
||||
|
||||
datadir_node_init(&node);
|
||||
it = datadir_iter_new(dir_path);
|
||||
|
||||
while (datadir_iter_next(it, &node))
|
||||
if (!node.is_empty_dir && !func(node.filepath))
|
||||
break;
|
||||
|
||||
datadir_iter_free(it);
|
||||
datadir_node_free(&node);
|
||||
}
|
||||
|
||||
void foreach_file_in_datadir(
|
||||
const char *dir_path, std::function<bool(const char *)> func)
|
||||
{
|
||||
DBUG_ASSERT(dir_path);
|
||||
os_file_dir_t dir = os_file_opendir(dir_path);
|
||||
os_file_stat_t info;
|
||||
while (os_file_readdir_next_file(dir_path, dir, &info) == 0) {
|
||||
if (info.type != OS_FILE_TYPE_FILE)
|
||||
continue;
|
||||
const char *pname = strrchr(info.name, IF_WIN('\\', '/'));
|
||||
if (!pname)
|
||||
pname = info.name;
|
||||
if (!func(pname))
|
||||
break;
|
||||
}
|
||||
os_file_closedir(dir);
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#ifndef XTRABACKUP_BACKUP_COPY_H
|
||||
#define XTRABACKUP_BACKUP_COPY_H
|
||||
|
||||
#include <functional>
|
||||
#include <my_global.h>
|
||||
#include <mysql.h>
|
||||
#include "datasink.h"
|
||||
@ -29,8 +30,7 @@ bool
|
||||
equal_paths(const char *first, const char *second);
|
||||
|
||||
/** Start --backup */
|
||||
bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta,
|
||||
CorruptedPages &corrupted_pages);
|
||||
bool backup_files(ds_ctxt *ds_data, const char *from);
|
||||
/** Release resources after backup_start() */
|
||||
void backup_release();
|
||||
/** Finish after backup_start() and backup_release() */
|
||||
@ -46,7 +46,25 @@ is_path_separator(char);
|
||||
bool
|
||||
directory_exists(const char *dir, bool create);
|
||||
|
||||
lsn_t
|
||||
get_current_lsn(MYSQL *connection);
|
||||
bool has_rocksdb_plugin();
|
||||
void rocksdb_create_checkpoint();
|
||||
void foreach_file_in_db_dirs(
|
||||
const char *dir_path, std::function<bool(const char *)> func);
|
||||
void foreach_file_in_datadir(
|
||||
const char *dir_path, std::function<bool(const char *)> func);
|
||||
bool ends_with(const char *str, const char *suffix);
|
||||
bool starts_with(const char *str, const char *prefix);
|
||||
void parse_db_table_from_file_path(
|
||||
const char *filepath, char *dbname, char *tablename);
|
||||
const char *trim_dotslash(const char *path);
|
||||
bool backup_files_from_datadir(ds_ctxt_t *ds_data,
|
||||
const char *dir_path,
|
||||
const char *prefix);
|
||||
|
||||
bool is_system_table(const char *dbname, const char *tablename);
|
||||
std::unique_ptr<std::vector<std::string>>
|
||||
find_files(const char *dir_path, const char *prefix, const char *suffix);
|
||||
bool file_exists(const char *filename);
|
||||
bool
|
||||
filename_matches(const char *filename, const char **ext_list);
|
||||
#endif
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
#include "my_dbug.h"
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
char *dbug_mariabackup_get_val(const char *event, fil_space_t::name_type key);
|
||||
/*
|
||||
@ -14,11 +15,21 @@ To use this facility, you need to
|
||||
for the variable)
|
||||
3. start mariabackup with --dbug=+d,debug_mariabackup_events
|
||||
*/
|
||||
#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) \
|
||||
DBUG_EXECUTE_IF("mariabackup_inject_code", \
|
||||
{ char *dbug_val= dbug_mariabackup_get_val(EVENT, KEY); \
|
||||
if (dbug_val) CODE })
|
||||
extern void dbug_mariabackup_event(
|
||||
const char *event, const fil_space_t::name_type key, bool need_lock);
|
||||
#define DBUG_MARIABACKUP_EVENT(A, B) \
|
||||
DBUG_EXECUTE_IF("mariabackup_events", \
|
||||
dbug_mariabackup_event(A,B,false););
|
||||
#define DBUG_MARIABACKUP_EVENT_LOCK(A, B) \
|
||||
DBUG_EXECUTE_IF("mariabackup_events", \
|
||||
dbug_mariabackup_event(A,B, true););
|
||||
#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) \
|
||||
DBUG_EXECUTE_IF("mariabackup_inject_code", {\
|
||||
char *dbug_val = dbug_mariabackup_get_val(EVENT, KEY); \
|
||||
if (dbug_val && *dbug_val) CODE \
|
||||
})
|
||||
#else
|
||||
#define DBUG_MARIABACKUP_EVENT(A,B)
|
||||
#define DBUG_MARIABACKUP_EVENT_LOCK(A,B)
|
||||
#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE)
|
||||
#endif
|
||||
|
||||
|
@ -47,6 +47,12 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <limits>
|
||||
#ifdef HAVE_PWD_H
|
||||
#ifdef HAVE_SYS_TYPES_H
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
#include <pwd.h>
|
||||
#endif
|
||||
#include "common.h"
|
||||
#include "xtrabackup.h"
|
||||
#include "srv0srv.h"
|
||||
@ -54,10 +60,11 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include "backup_copy.h"
|
||||
#include "backup_mysql.h"
|
||||
#include "mysqld.h"
|
||||
#include "xb_plugin.h"
|
||||
#include "encryption_plugin.h"
|
||||
#include <sstream>
|
||||
#include <sql_error.h>
|
||||
#include "page0zip.h"
|
||||
#include "backup_debug.h"
|
||||
|
||||
char *tool_name;
|
||||
char tool_args[2048];
|
||||
@ -66,7 +73,6 @@ ulong mysql_server_version;
|
||||
|
||||
/* server capabilities */
|
||||
bool have_changed_page_bitmaps = false;
|
||||
bool have_backup_locks = false;
|
||||
bool have_lock_wait_timeout = false;
|
||||
bool have_galera_enabled = false;
|
||||
bool have_multi_threaded_slave = false;
|
||||
@ -92,11 +98,54 @@ MYSQL *mysql_connection;
|
||||
|
||||
extern my_bool opt_ssl_verify_server_cert, opt_use_ssl;
|
||||
|
||||
|
||||
/*
|
||||
get_os_user()
|
||||
Ressemles read_user_name() from libmariadb/libmariadb/mariadb_lib.c.
|
||||
*/
|
||||
|
||||
#if !defined(_WIN32)
|
||||
|
||||
#if defined(HAVE_GETPWUID) && defined(NO_GETPWUID_DECL)
|
||||
struct passwd *getpwuid(uid_t);
|
||||
char* getlogin(void);
|
||||
#endif
|
||||
|
||||
static const char *get_os_user() // Posix
|
||||
{
|
||||
if (!geteuid())
|
||||
return "root";
|
||||
#ifdef HAVE_GETPWUID
|
||||
struct passwd *pw;
|
||||
const char *str;
|
||||
if ((pw= getpwuid(geteuid())) != NULL)
|
||||
return pw->pw_name;
|
||||
if ((str= getlogin()) != NULL)
|
||||
return str;
|
||||
#endif
|
||||
if ((str= getenv("USER")) ||
|
||||
(str= getenv("LOGNAME")) ||
|
||||
(str= getenv("LOGIN")))
|
||||
return str;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static const char *get_os_user() // Windows
|
||||
{
|
||||
return getenv("USERNAME");
|
||||
}
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
|
||||
MYSQL *
|
||||
xb_mysql_connect()
|
||||
{
|
||||
MYSQL *connection = mysql_init(NULL);
|
||||
char mysql_port_str[std::numeric_limits<int>::digits10 + 3];
|
||||
const char *user= opt_user ? opt_user : get_os_user();
|
||||
|
||||
sprintf(mysql_port_str, "%d", opt_port);
|
||||
|
||||
@ -126,7 +175,7 @@ xb_mysql_connect()
|
||||
|
||||
msg("Connecting to MariaDB server host: %s, user: %s, password: %s, "
|
||||
"port: %s, socket: %s", opt_host ? opt_host : "localhost",
|
||||
opt_user ? opt_user : "not set",
|
||||
user ? user : "not set",
|
||||
opt_password ? "set" : "not set",
|
||||
opt_port != 0 ? mysql_port_str : "not set",
|
||||
opt_socket ? opt_socket : "not set");
|
||||
@ -147,7 +196,7 @@ xb_mysql_connect()
|
||||
|
||||
if (!mysql_real_connect(connection,
|
||||
opt_host ? opt_host : "localhost",
|
||||
opt_user,
|
||||
user,
|
||||
opt_password,
|
||||
"" /*database*/, opt_port,
|
||||
opt_socket, 0)) {
|
||||
@ -203,13 +252,14 @@ struct mysql_variable {
|
||||
|
||||
|
||||
static
|
||||
void
|
||||
uint
|
||||
read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars,
|
||||
bool vertical_result)
|
||||
{
|
||||
MYSQL_RES *mysql_result;
|
||||
MYSQL_ROW row;
|
||||
mysql_variable *var;
|
||||
uint n_values=0;
|
||||
|
||||
mysql_result = xb_mysql_query(connection, query, true);
|
||||
|
||||
@ -223,6 +273,7 @@ read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars,
|
||||
if (strcmp(var->name, name) == 0
|
||||
&& value != NULL) {
|
||||
*(var->value) = strdup(value);
|
||||
n_values++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -239,6 +290,7 @@ read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars,
|
||||
if (strcmp(var->name, name) == 0
|
||||
&& value != NULL) {
|
||||
*(var->value) = strdup(value);
|
||||
n_values++;
|
||||
}
|
||||
}
|
||||
++i;
|
||||
@ -247,6 +299,7 @@ read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars,
|
||||
}
|
||||
|
||||
mysql_free_result(mysql_result);
|
||||
return n_values;
|
||||
}
|
||||
|
||||
|
||||
@ -311,7 +364,6 @@ bool get_mysql_vars(MYSQL *connection)
|
||||
{
|
||||
char *gtid_mode_var= NULL;
|
||||
char *version_var= NULL;
|
||||
char *have_backup_locks_var= NULL;
|
||||
char *log_bin_var= NULL;
|
||||
char *lock_wait_timeout_var= NULL;
|
||||
char *wsrep_on_var= NULL;
|
||||
@ -336,7 +388,6 @@ bool get_mysql_vars(MYSQL *connection)
|
||||
bool ret= true;
|
||||
|
||||
mysql_variable mysql_vars[]= {
|
||||
{"have_backup_locks", &have_backup_locks_var},
|
||||
{"log_bin", &log_bin_var},
|
||||
{"lock_wait_timeout", &lock_wait_timeout_var},
|
||||
{"gtid_mode", >id_mode_var},
|
||||
@ -361,11 +412,6 @@ bool get_mysql_vars(MYSQL *connection)
|
||||
|
||||
read_mysql_variables(connection, "SHOW VARIABLES", mysql_vars, true);
|
||||
|
||||
if (have_backup_locks_var != NULL && !opt_no_backup_locks)
|
||||
{
|
||||
have_backup_locks= true;
|
||||
}
|
||||
|
||||
if (opt_binlog_info == BINLOG_INFO_AUTO)
|
||||
{
|
||||
if (log_bin_var != NULL && !strcmp(log_bin_var, "ON"))
|
||||
@ -512,24 +558,6 @@ Query the server to find out what backup capabilities it supports.
|
||||
bool
|
||||
detect_mysql_capabilities_for_backup()
|
||||
{
|
||||
const char *query = "SELECT 'INNODB_CHANGED_PAGES', COUNT(*) FROM "
|
||||
"INFORMATION_SCHEMA.PLUGINS "
|
||||
"WHERE PLUGIN_NAME LIKE 'INNODB_CHANGED_PAGES'";
|
||||
char *innodb_changed_pages = NULL;
|
||||
mysql_variable vars[] = {
|
||||
{"INNODB_CHANGED_PAGES", &innodb_changed_pages}, {NULL, NULL}};
|
||||
|
||||
if (xtrabackup_incremental) {
|
||||
|
||||
read_mysql_variables(mysql_connection, query, vars, true);
|
||||
|
||||
ut_ad(innodb_changed_pages != NULL);
|
||||
|
||||
have_changed_page_bitmaps = (atoi(innodb_changed_pages) == 1);
|
||||
|
||||
free_mysql_variables(vars);
|
||||
}
|
||||
|
||||
/* do some sanity checks */
|
||||
if (opt_galera_info && !have_galera_enabled) {
|
||||
msg("--galera-info is specified on the command "
|
||||
@ -837,11 +865,11 @@ static void stop_query_killer()
|
||||
|
||||
|
||||
/*********************************************************************//**
|
||||
Function acquires either a backup tables lock, if supported
|
||||
by the server, or a global read lock (FLUSH TABLES WITH READ LOCK)
|
||||
otherwise.
|
||||
Function acquires backup locks
|
||||
@returns true if lock acquired */
|
||||
bool lock_tables(MYSQL *connection)
|
||||
|
||||
bool
|
||||
lock_for_backup_stage_start(MYSQL *connection)
|
||||
{
|
||||
if (have_lock_wait_timeout || opt_lock_wait_timeout)
|
||||
{
|
||||
@ -854,12 +882,6 @@ bool lock_tables(MYSQL *connection)
|
||||
xb_mysql_query(connection, buf, false);
|
||||
}
|
||||
|
||||
if (have_backup_locks)
|
||||
{
|
||||
msg("Executing LOCK TABLES FOR BACKUP...");
|
||||
xb_mysql_query(connection, "LOCK TABLES FOR BACKUP", false);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (opt_lock_wait_timeout)
|
||||
{
|
||||
@ -884,8 +906,6 @@ bool lock_tables(MYSQL *connection)
|
||||
|
||||
xb_mysql_query(connection, "BACKUP STAGE START", true);
|
||||
DBUG_MARIABACKUP_EVENT("after_backup_stage_start", {});
|
||||
xb_mysql_query(connection, "BACKUP STAGE BLOCK_COMMIT", true);
|
||||
DBUG_MARIABACKUP_EVENT("after_backup_stage_block_commit", {});
|
||||
/* Set the maximum supported session value for
|
||||
lock_wait_timeout to prevent unnecessary timeouts when the
|
||||
global value is changed from the default */
|
||||
@ -901,24 +921,68 @@ bool lock_tables(MYSQL *connection)
|
||||
return (true);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
If backup locks are used, execute LOCK BINLOG FOR BACKUP provided that we are
|
||||
not in the --no-lock mode and the lock has not been acquired already.
|
||||
@returns true if lock acquired */
|
||||
bool
|
||||
lock_binlog_maybe(MYSQL *connection)
|
||||
{
|
||||
if (have_backup_locks && !opt_no_lock && !binlog_locked) {
|
||||
msg("Executing LOCK BINLOG FOR BACKUP...");
|
||||
xb_mysql_query(connection, "LOCK BINLOG FOR BACKUP", false);
|
||||
binlog_locked = true;
|
||||
|
||||
return(true);
|
||||
lock_for_backup_stage_flush(MYSQL *connection) {
|
||||
if (opt_kill_long_queries_timeout) {
|
||||
start_query_killer();
|
||||
}
|
||||
|
||||
return(false);
|
||||
xb_mysql_query(connection, "BACKUP STAGE FLUSH", true);
|
||||
if (opt_kill_long_queries_timeout) {
|
||||
stop_query_killer();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
lock_for_backup_stage_block_ddl(MYSQL *connection) {
|
||||
if (opt_kill_long_queries_timeout) {
|
||||
start_query_killer();
|
||||
}
|
||||
xb_mysql_query(connection, "BACKUP STAGE BLOCK_DDL", true);
|
||||
DBUG_MARIABACKUP_EVENT("after_backup_stage_block_ddl", {});
|
||||
if (opt_kill_long_queries_timeout) {
|
||||
stop_query_killer();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
lock_for_backup_stage_commit(MYSQL *connection) {
|
||||
if (opt_kill_long_queries_timeout) {
|
||||
start_query_killer();
|
||||
}
|
||||
xb_mysql_query(connection, "BACKUP STAGE BLOCK_COMMIT", true);
|
||||
DBUG_MARIABACKUP_EVENT("after_backup_stage_block_commit", {});
|
||||
if (opt_kill_long_queries_timeout) {
|
||||
stop_query_killer();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool backup_lock(MYSQL *con, const char *table_name) {
|
||||
static const std::string backup_lock_prefix("BACKUP LOCK ");
|
||||
std::string backup_lock_query = backup_lock_prefix + table_name;
|
||||
xb_mysql_query(con, backup_lock_query.c_str(), true);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool backup_unlock(MYSQL *con) {
|
||||
xb_mysql_query(con, "BACKUP UNLOCK", true);
|
||||
return true;
|
||||
}
|
||||
|
||||
std::unordered_set<std::string>
|
||||
get_tables_in_use(MYSQL *con) {
|
||||
std::unordered_set<std::string> result;
|
||||
MYSQL_RES *q_res =
|
||||
xb_mysql_query(con, "SHOW OPEN TABLES WHERE In_use = 1", true);
|
||||
while (MYSQL_ROW row = mysql_fetch_row(q_res)) {
|
||||
auto tk = table_key(row[0], row[1]);
|
||||
msg("Table %s is in use", tk.c_str());
|
||||
result.insert(std::move(tk));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Releases either global read lock acquired with FTWRL and the binlog
|
||||
@ -1353,77 +1417,103 @@ write_slave_info(ds_ctxt *datasink, MYSQL *connection)
|
||||
|
||||
|
||||
/*********************************************************************//**
|
||||
Retrieves MySQL Galera and
|
||||
saves it in a file. It also prints it to stdout. */
|
||||
Retrieves MySQL Galera and saves it in a file. It also prints it to stdout.
|
||||
|
||||
We should create xtrabackup_galelera_info file even when backup locks
|
||||
are used because donor's wsrep_gtid_domain_id is needed later in joiner.
|
||||
Note that at this stage wsrep_local_state_uuid and wsrep_last_committed
|
||||
are inconsistent but they are not used in joiner. Joiner will rewrite this file
|
||||
at mariabackup --prepare phase and thus there is extra file donor_galera_info.
|
||||
Information is needed to maitain wsrep_gtid_domain_id and gtid_binlog_pos
|
||||
same across the cluster. If joiner node have different wsrep_gtid_domain_id
|
||||
we should still receive effective domain id from the donor node,
|
||||
and use it.
|
||||
*/
|
||||
bool
|
||||
write_galera_info(ds_ctxt *datasink, MYSQL *connection)
|
||||
{
|
||||
char *state_uuid = NULL, *state_uuid55 = NULL;
|
||||
char *last_committed = NULL, *last_committed55 = NULL;
|
||||
char *domain_id = NULL, *domain_id55 = NULL;
|
||||
bool result;
|
||||
char *state_uuid = NULL, *state_uuid55 = NULL;
|
||||
char *last_committed = NULL, *last_committed55 = NULL;
|
||||
char *domain_id = NULL, *domain_id55 = NULL;
|
||||
bool result=true;
|
||||
uint n_values=0;
|
||||
char *wsrep_on = NULL, *wsrep_on55 = NULL;
|
||||
|
||||
mysql_variable status[] = {
|
||||
{"Wsrep_local_state_uuid", &state_uuid},
|
||||
{"wsrep_local_state_uuid", &state_uuid55},
|
||||
{"Wsrep_last_committed", &last_committed},
|
||||
{"wsrep_last_committed", &last_committed55},
|
||||
{NULL, NULL}
|
||||
};
|
||||
mysql_variable vars[] = {
|
||||
{"Wsrep_on", &wsrep_on},
|
||||
{"wsrep_on", &wsrep_on55},
|
||||
{NULL, NULL}
|
||||
};
|
||||
|
||||
mysql_variable value[] = {
|
||||
{"Wsrep_gtid_domain_id", &domain_id},
|
||||
{"wsrep_gtid_domain_id", &domain_id55},
|
||||
{NULL, NULL}
|
||||
};
|
||||
mysql_variable status[] = {
|
||||
{"Wsrep_local_state_uuid", &state_uuid},
|
||||
{"wsrep_local_state_uuid", &state_uuid55},
|
||||
{"Wsrep_last_committed", &last_committed},
|
||||
{"wsrep_last_committed", &last_committed55},
|
||||
{NULL, NULL}
|
||||
};
|
||||
|
||||
/* When backup locks are supported by the server, we should skip
|
||||
creating MB_GALERA_INFO file on the backup stage, because
|
||||
wsrep_local_state_uuid and wsrep_last_committed will be inconsistent
|
||||
without blocking commits. The state file will be created on the prepare
|
||||
stage using the WSREP recovery procedure. */
|
||||
if (have_backup_locks) {
|
||||
return(true);
|
||||
}
|
||||
mysql_variable value[] = {
|
||||
{"Wsrep_gtid_domain_id", &domain_id},
|
||||
{"wsrep_gtid_domain_id", &domain_id55},
|
||||
{NULL, NULL}
|
||||
};
|
||||
|
||||
read_mysql_variables(connection, "SHOW STATUS", status, true);
|
||||
n_values= read_mysql_variables(connection, "SHOW VARIABLES", vars, true);
|
||||
|
||||
if ((state_uuid == NULL && state_uuid55 == NULL)
|
||||
|| (last_committed == NULL && last_committed55 == NULL)) {
|
||||
msg("Warning: failed to get master wsrep state from SHOW STATUS.");
|
||||
result = true;
|
||||
goto cleanup;
|
||||
}
|
||||
if (n_values == 0 || (wsrep_on == NULL && wsrep_on55 == NULL))
|
||||
{
|
||||
msg("Server is not Galera node thus --galera-info does not "
|
||||
"have any effect.");
|
||||
result = true;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
read_mysql_variables(connection, "SHOW VARIABLES LIKE 'wsrep%'", value, true);
|
||||
read_mysql_variables(connection, "SHOW STATUS", status, true);
|
||||
|
||||
if (domain_id == NULL && domain_id55 == NULL) {
|
||||
msg("Warning: failed to get master wsrep state from SHOW VARIABLES.");
|
||||
result = true;
|
||||
goto cleanup;
|
||||
}
|
||||
if ((state_uuid == NULL && state_uuid55 == NULL)
|
||||
|| (last_committed == NULL && last_committed55 == NULL))
|
||||
{
|
||||
msg("Warning: failed to get master wsrep state from SHOW STATUS.");
|
||||
result = true;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
result = datasink->backup_file_printf(MB_GALERA_INFO,
|
||||
"%s:%s %s\n", state_uuid ? state_uuid : state_uuid55,
|
||||
last_committed ? last_committed : last_committed55,
|
||||
domain_id ? domain_id : domain_id55);
|
||||
n_values= read_mysql_variables(connection, "SHOW VARIABLES LIKE 'wsrep%'", value, true);
|
||||
|
||||
if (result)
|
||||
{
|
||||
result= datasink->backup_file_printf(XTRABACKUP_DONOR_GALERA_INFO,
|
||||
"%s:%s %s\n", state_uuid ? state_uuid : state_uuid55,
|
||||
last_committed ? last_committed : last_committed55,
|
||||
domain_id ? domain_id : domain_id55);
|
||||
}
|
||||
if (result)
|
||||
{
|
||||
write_current_binlog_file(datasink, connection);
|
||||
}
|
||||
if (n_values == 0 || (domain_id == NULL && domain_id55 == NULL))
|
||||
{
|
||||
msg("Warning: failed to get master wsrep state from SHOW VARIABLES.");
|
||||
result = true;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
result= datasink->backup_file_printf(MB_GALERA_INFO,
|
||||
"%s:%s %s\n", state_uuid ? state_uuid : state_uuid55,
|
||||
last_committed ? last_committed : last_committed55,
|
||||
domain_id ? domain_id : domain_id55);
|
||||
|
||||
if (result)
|
||||
{
|
||||
result= datasink->backup_file_printf(XTRABACKUP_DONOR_GALERA_INFO,
|
||||
"%s:%s %s\n", state_uuid ? state_uuid : state_uuid55,
|
||||
last_committed ? last_committed : last_committed55,
|
||||
domain_id ? domain_id : domain_id55);
|
||||
}
|
||||
|
||||
if (result)
|
||||
write_current_binlog_file(datasink, connection);
|
||||
|
||||
if (result)
|
||||
msg("Writing Galera info succeeded with %s:%s %s",
|
||||
state_uuid ? state_uuid : state_uuid55,
|
||||
last_committed ? last_committed : last_committed55,
|
||||
domain_id ? domain_id : domain_id55);
|
||||
|
||||
cleanup:
|
||||
free_mysql_variables(status);
|
||||
free_mysql_variables(status);
|
||||
|
||||
return(result);
|
||||
return(result);
|
||||
}
|
||||
|
||||
|
||||
@ -1466,8 +1556,6 @@ write_current_binlog_file(ds_ctxt *datasink, MYSQL *connection)
|
||||
if (gtid_exists) {
|
||||
size_t log_bin_dir_length;
|
||||
|
||||
lock_binlog_maybe(connection);
|
||||
|
||||
xb_mysql_query(connection, "FLUSH BINARY LOGS", false);
|
||||
|
||||
read_mysql_variables(connection, "SHOW MASTER STATUS",
|
||||
@ -1826,13 +1914,13 @@ bool write_backup_config_file(ds_ctxt *datasink)
|
||||
srv_log_file_size,
|
||||
srv_page_size,
|
||||
srv_undo_dir,
|
||||
srv_undo_tablespaces,
|
||||
(uint) srv_undo_tablespaces,
|
||||
page_zip_level,
|
||||
innobase_buffer_pool_filename ?
|
||||
"innodb_buffer_pool_filename=" : "",
|
||||
innobase_buffer_pool_filename ?
|
||||
innobase_buffer_pool_filename : "",
|
||||
xb_plugin_get_config());
|
||||
encryption_plugin_get_config());
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1882,18 +1970,6 @@ select_history()
|
||||
return(true);
|
||||
}
|
||||
|
||||
bool
|
||||
flush_changed_page_bitmaps()
|
||||
{
|
||||
if (xtrabackup_incremental && have_changed_page_bitmaps &&
|
||||
!xtrabackup_incremental_force_scan) {
|
||||
xb_mysql_query(mysql_connection,
|
||||
"FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS", false);
|
||||
}
|
||||
return(true);
|
||||
}
|
||||
|
||||
|
||||
/*********************************************************************//**
|
||||
Deallocate memory, disconnect from server, etc.
|
||||
@return true on success. */
|
||||
@ -1969,3 +2045,23 @@ mdl_unlock_all()
|
||||
mysql_close(mdl_con);
|
||||
spaceid_to_tablename.clear();
|
||||
}
|
||||
|
||||
ulonglong get_current_lsn(MYSQL *connection)
|
||||
{
|
||||
static const char lsn_prefix[] = "\nLog sequence number ";
|
||||
ulonglong lsn = 0;
|
||||
if (MYSQL_RES *res = xb_mysql_query(connection,
|
||||
"SHOW ENGINE INNODB STATUS",
|
||||
true, false)) {
|
||||
if (MYSQL_ROW row = mysql_fetch_row(res)) {
|
||||
const char *p= strstr(row[2], lsn_prefix);
|
||||
DBUG_ASSERT(p);
|
||||
if (p) {
|
||||
p += sizeof lsn_prefix - 1;
|
||||
lsn = lsn_t(strtoll(p, NULL, 10));
|
||||
}
|
||||
}
|
||||
mysql_free_result(res);
|
||||
}
|
||||
return lsn;
|
||||
}
|
||||
|
@ -2,13 +2,15 @@
|
||||
#define XTRABACKUP_BACKUP_MYSQL_H
|
||||
|
||||
#include <mysql.h>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include "datasink.h"
|
||||
|
||||
/* MariaDB version */
|
||||
extern ulong mysql_server_version;
|
||||
|
||||
/* server capabilities */
|
||||
extern bool have_changed_page_bitmaps;
|
||||
extern bool have_backup_locks;
|
||||
extern bool have_lock_wait_timeout;
|
||||
extern bool have_galera_enabled;
|
||||
extern bool have_multi_threaded_slave;
|
||||
@ -35,9 +37,6 @@ capture_tool_command(int argc, char **argv);
|
||||
bool
|
||||
select_history();
|
||||
|
||||
bool
|
||||
flush_changed_page_bitmaps();
|
||||
|
||||
void
|
||||
backup_cleanup();
|
||||
|
||||
@ -75,7 +74,21 @@ bool
|
||||
lock_binlog_maybe(MYSQL *connection);
|
||||
|
||||
bool
|
||||
lock_tables(MYSQL *connection);
|
||||
lock_for_backup_stage_start(MYSQL *connection);
|
||||
|
||||
bool
|
||||
lock_for_backup_stage_flush(MYSQL *connection);
|
||||
|
||||
bool
|
||||
lock_for_backup_stage_block_ddl(MYSQL *connection);
|
||||
|
||||
bool
|
||||
lock_for_backup_stage_commit(MYSQL *connection);
|
||||
|
||||
bool backup_lock(MYSQL *con, const char *table_name);
|
||||
bool backup_unlock(MYSQL *con);
|
||||
|
||||
std::unordered_set<std::string> get_tables_in_use(MYSQL *con);
|
||||
|
||||
bool
|
||||
wait_for_safe_slave(MYSQL *connection);
|
||||
@ -86,5 +99,6 @@ write_galera_info(ds_ctxt *datasink, MYSQL *connection);
|
||||
bool
|
||||
write_slave_info(ds_ctxt *datasink, MYSQL *connection);
|
||||
|
||||
ulonglong get_current_lsn(MYSQL *connection);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,85 +0,0 @@
|
||||
/******************************************************
|
||||
XtraBackup: hot backup tool for InnoDB
|
||||
(c) 2009-2012 Percona Inc.
|
||||
Originally Created 3/3/2009 Yasufumi Kinoshita
|
||||
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
|
||||
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
*******************************************************/
|
||||
|
||||
/* Changed page bitmap interface */
|
||||
|
||||
#ifndef XB_CHANGED_PAGE_BITMAP_H
|
||||
#define XB_CHANGED_PAGE_BITMAP_H
|
||||
|
||||
#include <ut0rbt.h>
|
||||
#include <fil0fil.h>
|
||||
|
||||
/* The changed page bitmap structure */
|
||||
typedef ib_rbt_t xb_page_bitmap;
|
||||
|
||||
struct xb_page_bitmap_range_struct;
|
||||
|
||||
/* The bitmap range iterator over one space id */
|
||||
typedef struct xb_page_bitmap_range_struct xb_page_bitmap_range;
|
||||
|
||||
/****************************************************************//**
|
||||
Read the disk bitmap and build the changed page bitmap tree for the
|
||||
LSN interval incremental_lsn to log_sys.next_checkpoint_lsn.
|
||||
|
||||
@return the built bitmap tree */
|
||||
xb_page_bitmap*
|
||||
xb_page_bitmap_init(void);
|
||||
/*=====================*/
|
||||
|
||||
/****************************************************************//**
|
||||
Free the bitmap tree. */
|
||||
void
|
||||
xb_page_bitmap_deinit(
|
||||
/*==================*/
|
||||
xb_page_bitmap* bitmap); /*!<in/out: bitmap tree */
|
||||
|
||||
|
||||
/****************************************************************//**
|
||||
Set up a new bitmap range iterator over a given space id changed
|
||||
pages in a given bitmap.
|
||||
|
||||
@return bitmap range iterator */
|
||||
xb_page_bitmap_range*
|
||||
xb_page_bitmap_range_init(
|
||||
/*======================*/
|
||||
xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
|
||||
ulint space_id); /*!< in: space id */
|
||||
|
||||
/****************************************************************//**
|
||||
Get the next page id that has its bit set or cleared, i.e. equal to
|
||||
bit_value.
|
||||
|
||||
@return page id */
|
||||
ulint
|
||||
xb_page_bitmap_range_get_next_bit(
|
||||
/*==============================*/
|
||||
xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
|
||||
ibool bit_value); /*!< in: bit value */
|
||||
|
||||
/****************************************************************//**
|
||||
Free the bitmap range iterator. */
|
||||
void
|
||||
xb_page_bitmap_range_deinit(
|
||||
/*========================*/
|
||||
xb_page_bitmap_range* bitmap_range); /*! in/out: bitmap range */
|
||||
|
||||
#endif
|
@ -23,7 +23,6 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
#include <my_global.h>
|
||||
#include <mysql_version.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdarg.h>
|
||||
#include <my_sys.h>
|
||||
|
||||
@ -143,7 +142,7 @@ static inline ATTRIBUTE_FORMAT(printf, 1,2) ATTRIBUTE_NORETURN void die(const ch
|
||||
# define POSIX_FADV_NORMAL
|
||||
# define POSIX_FADV_SEQUENTIAL
|
||||
# define POSIX_FADV_DONTNEED
|
||||
# define posix_fadvise(a,b,c,d) do {} while(0)
|
||||
# define posix_fadvise(fd, offset, len, advice) do { (void)offset; } while(0)
|
||||
#endif
|
||||
|
||||
/***********************************************************************
|
||||
|
512
extra/mariabackup/common_engine.cc
Normal file
512
extra/mariabackup/common_engine.cc
Normal file
@ -0,0 +1,512 @@
|
||||
#include "common_engine.h"
|
||||
#include "backup_copy.h"
|
||||
#include "xtrabackup.h"
|
||||
#include "common.h"
|
||||
#include "backup_debug.h"
|
||||
|
||||
#include <unordered_map>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <chrono>
|
||||
|
||||
namespace common_engine {
|
||||
|
||||
class Table {
|
||||
public:
|
||||
Table(std::string &db, std::string &table, std::string &fs_name) :
|
||||
m_db(std::move(db)), m_table(std::move(table)),
|
||||
m_fs_name(std::move(fs_name)) {}
|
||||
virtual ~Table() {}
|
||||
void add_file_name(const char *file_name) { m_fnames.push_back(file_name); }
|
||||
virtual bool copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock,
|
||||
bool finalize, unsigned thread_num);
|
||||
std::string &get_db() { return m_db; }
|
||||
std::string &get_table() { return m_table; }
|
||||
std::string &get_version() { return m_version; }
|
||||
|
||||
protected:
|
||||
std::string m_db;
|
||||
std::string m_table;
|
||||
std::string m_fs_name;
|
||||
std::string m_version;
|
||||
std::vector<std::string> m_fnames;
|
||||
};
|
||||
|
||||
bool
|
||||
Table::copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, bool, unsigned thread_num) {
|
||||
static const size_t buf_size = 10 * 1024 * 1024;
|
||||
std::unique_ptr<uchar[]> buf;
|
||||
bool result = false;
|
||||
File frm_file = -1;
|
||||
std::vector<File> files;
|
||||
bool locked = false;
|
||||
std::string full_tname("`");
|
||||
full_tname.append(m_db).append("`.`").append(m_table).append("`");
|
||||
|
||||
if (!no_lock && !backup_lock(con, full_tname.c_str())) {
|
||||
msg(thread_num, "Error on executing BACKUP LOCK for table %s",
|
||||
full_tname.c_str());
|
||||
goto exit;
|
||||
}
|
||||
else
|
||||
locked = !no_lock;
|
||||
|
||||
if ((frm_file = mysql_file_open(key_file_frm, (m_fs_name + ".frm").c_str(),
|
||||
O_RDONLY | O_SHARE, MYF(0))) < 0 && !m_fnames.empty() &&
|
||||
!ends_with(m_fnames[0].c_str(), ".ARZ") &&
|
||||
!ends_with(m_fnames[0].c_str(), ".ARM")) {
|
||||
// Don't treat it as error, as the table can be dropped after it
|
||||
// was added to queue for copying
|
||||
result = true;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
for (const auto &fname : m_fnames) {
|
||||
File file = mysql_file_open(0, fname.c_str(),O_RDONLY | O_SHARE, MYF(0));
|
||||
if (file < 0) {
|
||||
msg(thread_num, "Error on file %s open during %s table copy",
|
||||
fname.c_str(), full_tname.c_str());
|
||||
goto exit;
|
||||
}
|
||||
files.push_back(file);
|
||||
}
|
||||
|
||||
if (locked && !backup_unlock(con)) {
|
||||
msg(thread_num, "Error on BACKUP UNLOCK for table %s", full_tname.c_str());
|
||||
locked = false;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
locked = false;
|
||||
|
||||
buf.reset(new uchar[buf_size]);
|
||||
|
||||
for (size_t i = 0; i < m_fnames.size(); ++i) {
|
||||
ds_file_t *dst_file = nullptr;
|
||||
size_t bytes_read;
|
||||
size_t copied_size = 0;
|
||||
MY_STAT stat_info;
|
||||
|
||||
if (my_fstat(files[i], &stat_info, MYF(0))) {
|
||||
msg(thread_num, "error: failed to get stat info for file %s of "
|
||||
"table %s", m_fnames[i].c_str(), full_tname.c_str());
|
||||
goto exit;
|
||||
}
|
||||
|
||||
const char *dst_path =
|
||||
(xtrabackup_copy_back || xtrabackup_move_back) ?
|
||||
m_fnames[i].c_str() : trim_dotslash(m_fnames[i].c_str());
|
||||
|
||||
dst_file = ds_open(ds, dst_path, &stat_info, false);
|
||||
if (!dst_file) {
|
||||
msg(thread_num, "error: cannot open destination stream for %s, table %s",
|
||||
dst_path, full_tname.c_str());
|
||||
goto exit;
|
||||
}
|
||||
|
||||
while ((bytes_read = my_read(files[i], buf.get(), buf_size, MY_WME))) {
|
||||
if (bytes_read == size_t(-1)) {
|
||||
msg(thread_num, "error: file %s read for table %s",
|
||||
m_fnames[i].c_str(), full_tname.c_str());
|
||||
ds_close(dst_file);
|
||||
goto exit;
|
||||
}
|
||||
xtrabackup_io_throttling();
|
||||
if (ds_write(dst_file, buf.get(), bytes_read)) {
|
||||
msg(thread_num, "error: file %s write for table %s",
|
||||
dst_path, full_tname.c_str());
|
||||
ds_close(dst_file);
|
||||
goto exit;
|
||||
}
|
||||
copied_size += bytes_read;
|
||||
}
|
||||
mysql_file_close(files[i], MYF(MY_WME));
|
||||
files[i] = -1;
|
||||
ds_close(dst_file);
|
||||
msg(thread_num, "Copied file %s for table %s, %zu bytes",
|
||||
m_fnames[i].c_str(), full_tname.c_str(), copied_size);
|
||||
}
|
||||
|
||||
result = true;
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
{
|
||||
std::string sql_name(m_db);
|
||||
sql_name.append("/").append(m_table);
|
||||
DBUG_MARIABACKUP_EVENT_LOCK("after_ce_table_copy", fil_space_t::name_type(sql_name.data(), sql_name.size()));
|
||||
}
|
||||
#endif // DBUG_OFF
|
||||
exit:
|
||||
if (frm_file >= 0) {
|
||||
m_version = ::read_table_version_id(frm_file);
|
||||
mysql_file_close(frm_file, MYF(MY_WME));
|
||||
}
|
||||
if (locked && !backup_unlock(con)) {
|
||||
msg(thread_num, "Error on BACKUP UNLOCK for table %s", full_tname.c_str());
|
||||
result = false;
|
||||
}
|
||||
for (auto file : files)
|
||||
if (file >= 0)
|
||||
mysql_file_close(file, MYF(MY_WME));
|
||||
return result;
|
||||
}
|
||||
|
||||
// Append-only tables
|
||||
class LogTable : public Table {
|
||||
public:
|
||||
LogTable(std::string &db, std::string &table, std::string &fs_name) :
|
||||
Table(db, table, fs_name) {}
|
||||
|
||||
virtual ~LogTable() { (void)close(); }
|
||||
bool
|
||||
copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, bool finalize,
|
||||
unsigned thread_num) override;
|
||||
bool close();
|
||||
private:
|
||||
bool open(ds_ctxt_t *ds, unsigned thread_num);
|
||||
std::vector<File> m_src;
|
||||
std::vector<ds_file_t *> m_dst;
|
||||
};
|
||||
|
||||
bool
|
||||
LogTable::open(ds_ctxt_t *ds, unsigned thread_num) {
|
||||
DBUG_ASSERT(m_src.empty());
|
||||
DBUG_ASSERT(m_dst.empty());
|
||||
|
||||
std::string full_tname("`");
|
||||
full_tname.append(m_db).append("`.`").append(m_table).append("`");
|
||||
|
||||
for (const auto &fname : m_fnames) {
|
||||
File file = mysql_file_open(0, fname.c_str(),O_RDONLY | O_SHARE, MYF(0));
|
||||
if (file < 0) {
|
||||
msg(thread_num, "Error on file %s open during %s log table copy",
|
||||
fname.c_str(), full_tname.c_str());
|
||||
return false;
|
||||
}
|
||||
m_src.push_back(file);
|
||||
|
||||
MY_STAT stat_info;
|
||||
if (my_fstat(file, &stat_info, MYF(0))) {
|
||||
msg(thread_num, "error: failed to get stat info for file %s of "
|
||||
"log table %s", fname.c_str(), full_tname.c_str());
|
||||
return false;
|
||||
}
|
||||
const char *dst_path =
|
||||
(xtrabackup_copy_back || xtrabackup_move_back) ?
|
||||
fname.c_str() : trim_dotslash(fname.c_str());
|
||||
ds_file_t *dst_file = ds_open(ds, dst_path, &stat_info, false);
|
||||
if (!dst_file) {
|
||||
msg(thread_num, "error: cannot open destination stream for %s, "
|
||||
"log table %s", dst_path, full_tname.c_str());
|
||||
return false;
|
||||
}
|
||||
m_dst.push_back(dst_file);
|
||||
}
|
||||
|
||||
File frm_file;
|
||||
if ((frm_file = mysql_file_open(key_file_frm, (m_fs_name + ".frm").c_str(),
|
||||
O_RDONLY | O_SHARE, MYF(0))) < 0 && !m_fnames.empty() &&
|
||||
!ends_with(m_fnames[0].c_str(), ".ARZ") &&
|
||||
!ends_with(m_fnames[0].c_str(), ".ARM")) {
|
||||
msg(thread_num, "Error on .frm file open for log table %s",
|
||||
full_tname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
m_version = ::read_table_version_id(frm_file);
|
||||
mysql_file_close(frm_file, MYF(MY_WME));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LogTable::close() {
|
||||
while (!m_src.empty()) {
|
||||
auto f = m_src.back();
|
||||
m_src.pop_back();
|
||||
mysql_file_close(f, MYF(MY_WME));
|
||||
}
|
||||
while (!m_dst.empty()) {
|
||||
auto f = m_dst.back();
|
||||
m_dst.pop_back();
|
||||
ds_close(f);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LogTable::copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, bool finalize,
|
||||
unsigned thread_num) {
|
||||
static const size_t buf_size = 10 * 1024 * 1024;
|
||||
DBUG_ASSERT(ds);
|
||||
DBUG_ASSERT(con);
|
||||
if (m_src.empty() && !open(ds, thread_num)) {
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
DBUG_ASSERT(m_src.size() == m_dst.size());
|
||||
|
||||
std::unique_ptr<uchar[]> buf(new uchar[buf_size]);
|
||||
for (size_t i = 0; i < m_src.size(); ++i) {
|
||||
// .CSM can be rewritten (see write_meta_file() usage in ha_tina.cc)
|
||||
if (!finalize && ends_with(m_fnames[i].c_str(), ".CSM"))
|
||||
continue;
|
||||
size_t bytes_read;
|
||||
size_t copied_size = 0;
|
||||
while ((bytes_read = my_read(m_src[i], buf.get(), buf_size, MY_WME))) {
|
||||
if (bytes_read == size_t(-1)) {
|
||||
msg(thread_num, "error: file %s read for log table %s",
|
||||
m_fnames[i].c_str(),
|
||||
std::string("`").append(m_db).append("`.`").
|
||||
append(m_table).append("`").c_str());
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
xtrabackup_io_throttling();
|
||||
if (ds_write(m_dst[i], buf.get(), bytes_read)) {
|
||||
msg(thread_num, "error: file %s write for log table %s",
|
||||
m_fnames[i].c_str(), std::string("`").append(m_db).append("`.`").
|
||||
append(m_table).append("`").c_str());
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
copied_size += bytes_read;
|
||||
}
|
||||
msg(thread_num, "Copied file %s for log table %s, %zu bytes",
|
||||
m_fnames[i].c_str(), std::string("`").append(m_db).append("`.`").
|
||||
append(m_table).append("`").c_str(), copied_size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class BackupImpl {
|
||||
public:
|
||||
BackupImpl(
|
||||
const char *datadir_path, ds_ctxt_t *datasink,
|
||||
std::vector<MYSQL *> &con_pool, ThreadPool &thread_pool) :
|
||||
m_datadir_path(datadir_path), m_ds(datasink), m_con_pool(con_pool),
|
||||
m_process_table_jobs(thread_pool) {}
|
||||
~BackupImpl() { }
|
||||
bool scan(
|
||||
const std::unordered_set<std::string> &exclude_tables,
|
||||
std::unordered_set<std::string> *out_processed_tables,
|
||||
bool no_lock, bool collect_log_and_stats);
|
||||
void set_post_copy_table_hook(const post_copy_table_hook_t &hook) {
|
||||
m_table_post_copy_hook = hook;
|
||||
}
|
||||
bool copy_log_tables(bool finalize);
|
||||
bool copy_stats_tables();
|
||||
bool wait_for_finish();
|
||||
bool close_log_tables();
|
||||
private:
|
||||
|
||||
void process_table_job(Table *table, bool no_lock, bool delete_table,
|
||||
bool finalize, unsigned thread_num);
|
||||
|
||||
const char *m_datadir_path;
|
||||
ds_ctxt_t *m_ds;
|
||||
std::vector<MYSQL *> &m_con_pool;
|
||||
TasksGroup m_process_table_jobs;
|
||||
|
||||
post_copy_table_hook_t m_table_post_copy_hook;
|
||||
std::unordered_map<table_key_t, std::unique_ptr<LogTable>> m_log_tables;
|
||||
std::unordered_map<table_key_t, std::unique_ptr<Table>> m_stats_tables;
|
||||
};
|
||||
|
||||
void BackupImpl::process_table_job(Table *table, bool no_lock,
|
||||
bool delete_table, bool finalize, unsigned thread_num) {
|
||||
int result = 0;
|
||||
|
||||
if (!m_process_table_jobs.get_result())
|
||||
goto exit;
|
||||
|
||||
if (!table->copy(m_ds, m_con_pool[thread_num], no_lock, finalize, thread_num))
|
||||
goto exit;
|
||||
|
||||
if (m_table_post_copy_hook)
|
||||
m_table_post_copy_hook(table->get_db(), table->get_table(),
|
||||
table->get_version());
|
||||
|
||||
result = 1;
|
||||
|
||||
exit:
|
||||
if (delete_table)
|
||||
delete table;
|
||||
m_process_table_jobs.finish_task(result);
|
||||
}
|
||||
|
||||
bool BackupImpl::scan(const std::unordered_set<table_key_t> &exclude_tables,
|
||||
std::unordered_set<table_key_t> *out_processed_tables, bool no_lock,
|
||||
bool collect_log_and_stats) {
|
||||
|
||||
msg("Start scanning common engine tables, need backup locks: %d, "
|
||||
"collect log and stat tables: %d", no_lock, collect_log_and_stats);
|
||||
|
||||
std::unordered_map<table_key_t, std::unique_ptr<Table>> found_tables;
|
||||
|
||||
foreach_file_in_db_dirs(m_datadir_path,
|
||||
[&](const char *file_path)->bool {
|
||||
|
||||
static const char *ext_list[] =
|
||||
{".MYD", ".MYI", ".MRG", ".ARM", ".ARZ", ".CSM", ".CSV", NULL};
|
||||
|
||||
bool is_aria = ends_with(file_path, ".MAD") || ends_with(file_path, ".MAI");
|
||||
|
||||
if (!collect_log_and_stats && is_aria)
|
||||
return true;
|
||||
|
||||
if (!is_aria && !filename_matches(file_path, ext_list))
|
||||
return true;
|
||||
|
||||
if (check_if_skip_table(file_path)) {
|
||||
msg("Skipping %s.", file_path);
|
||||
return true;
|
||||
}
|
||||
|
||||
auto db_table_fs = convert_filepath_to_tablename(file_path);
|
||||
auto tk =
|
||||
table_key(std::get<0>(db_table_fs), std::get<1>(db_table_fs));
|
||||
|
||||
// log and stats tables are only collected in this function,
|
||||
// so there is no need to filter out them with exclude_tables.
|
||||
if (collect_log_and_stats) {
|
||||
if (is_log_table(std::get<0>(db_table_fs).c_str(),
|
||||
std::get<1>(db_table_fs).c_str())) {
|
||||
auto table_it = m_log_tables.find(tk);
|
||||
if (table_it == m_log_tables.end()) {
|
||||
msg("Log table found: %s", tk.c_str());
|
||||
table_it = m_log_tables.emplace(tk,
|
||||
std::unique_ptr<LogTable>(new LogTable(std::get<0>(db_table_fs),
|
||||
std::get<1>(db_table_fs), std::get<2>(db_table_fs)))).first;
|
||||
}
|
||||
msg("Collect log table file: %s", file_path);
|
||||
table_it->second->add_file_name(file_path);
|
||||
return true;
|
||||
}
|
||||
// Aria can handle statistics tables
|
||||
else if (is_stats_table(std::get<0>(db_table_fs).c_str(),
|
||||
std::get<1>(db_table_fs).c_str()) && !is_aria) {
|
||||
auto table_it = m_stats_tables.find(tk);
|
||||
if (table_it == m_stats_tables.end()) {
|
||||
msg("Stats table found: %s", tk.c_str());
|
||||
table_it = m_stats_tables.emplace(tk,
|
||||
std::unique_ptr<Table>(new Table(std::get<0>(db_table_fs),
|
||||
std::get<1>(db_table_fs), std::get<2>(db_table_fs)))).first;
|
||||
}
|
||||
msg("Collect stats table file: %s", file_path);
|
||||
table_it->second->add_file_name(file_path);
|
||||
return true;
|
||||
}
|
||||
} else if (is_log_table(std::get<0>(db_table_fs).c_str(),
|
||||
std::get<1>(db_table_fs).c_str()) ||
|
||||
is_stats_table(std::get<0>(db_table_fs).c_str(),
|
||||
std::get<1>(db_table_fs).c_str()))
|
||||
return true;
|
||||
|
||||
if (is_aria)
|
||||
return true;
|
||||
|
||||
if (exclude_tables.count(tk)) {
|
||||
msg("Skip table %s at it is in exclude list", tk.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
auto table_it = found_tables.find(tk);
|
||||
if (table_it == found_tables.end()) {
|
||||
table_it = found_tables.emplace(tk,
|
||||
std::unique_ptr<Table>(new Table(std::get<0>(db_table_fs),
|
||||
std::get<1>(db_table_fs), std::get<2>(db_table_fs)))).first;
|
||||
}
|
||||
|
||||
table_it->second->add_file_name(file_path);
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
for (auto &table_it : found_tables) {
|
||||
m_process_table_jobs.push_task(
|
||||
std::bind(&BackupImpl::process_table_job, this, table_it.second.release(),
|
||||
no_lock, true, false, std::placeholders::_1));
|
||||
if (out_processed_tables)
|
||||
out_processed_tables->insert(table_it.first);
|
||||
}
|
||||
|
||||
msg("Stop scanning common engine tables");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BackupImpl::copy_log_tables(bool finalize) {
|
||||
for (auto &table_it : m_log_tables) {
|
||||
// Do not execute BACKUP LOCK for log tables as it's supposed
|
||||
// that they must be copied on BLOCK_DDL and BLOCK_COMMIT locks.
|
||||
m_process_table_jobs.push_task(
|
||||
std::bind(&BackupImpl::process_table_job, this, table_it.second.get(),
|
||||
true, false, finalize, std::placeholders::_1));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BackupImpl::copy_stats_tables() {
|
||||
for (auto &table_it : m_stats_tables) {
|
||||
// Do not execute BACKUP LOCK for stats tables as it's supposed
|
||||
// that they must be copied on BLOCK_DDL and BLOCK_COMMIT locks.
|
||||
// Delete stats table object after copy (see process_table_job())
|
||||
m_process_table_jobs.push_task(
|
||||
std::bind(&BackupImpl::process_table_job, this, table_it.second.release(),
|
||||
true, true, false, std::placeholders::_1));
|
||||
}
|
||||
m_stats_tables.clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BackupImpl::wait_for_finish() {
|
||||
/* Wait for threads to exit */
|
||||
return m_process_table_jobs.wait_for_finish();
|
||||
}
|
||||
|
||||
bool BackupImpl::close_log_tables() {
|
||||
bool result = wait_for_finish();
|
||||
for (auto &table_it : m_log_tables)
|
||||
table_it.second->close();
|
||||
return result;
|
||||
}
|
||||
|
||||
Backup::Backup(const char *datadir_path, ds_ctxt_t *datasink,
|
||||
std::vector<MYSQL *> &con_pool, ThreadPool &thread_pool) :
|
||||
m_backup_impl(
|
||||
new BackupImpl(datadir_path, datasink, con_pool,
|
||||
thread_pool)) { }
|
||||
|
||||
Backup::~Backup() {
|
||||
delete m_backup_impl;
|
||||
}
|
||||
|
||||
bool Backup::scan(
|
||||
const std::unordered_set<table_key_t> &exclude_tables,
|
||||
std::unordered_set<table_key_t> *out_processed_tables,
|
||||
bool no_lock, bool collect_log_and_stats) {
|
||||
return m_backup_impl->scan(exclude_tables, out_processed_tables, no_lock,
|
||||
collect_log_and_stats);
|
||||
}
|
||||
|
||||
bool Backup::copy_log_tables(bool finalize) {
|
||||
return m_backup_impl->copy_log_tables(finalize);
|
||||
}
|
||||
|
||||
bool Backup::copy_stats_tables() {
|
||||
return m_backup_impl->copy_stats_tables();
|
||||
}
|
||||
|
||||
bool Backup::wait_for_finish() {
|
||||
return m_backup_impl->wait_for_finish();
|
||||
}
|
||||
|
||||
bool Backup::close_log_tables() {
|
||||
return m_backup_impl->close_log_tables();
|
||||
}
|
||||
|
||||
void Backup::set_post_copy_table_hook(const post_copy_table_hook_t &hook) {
|
||||
m_backup_impl->set_post_copy_table_hook(hook);
|
||||
}
|
||||
|
||||
} // namespace common_engine
|
39
extra/mariabackup/common_engine.h
Normal file
39
extra/mariabackup/common_engine.h
Normal file
@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
#include "my_global.h"
|
||||
#include "backup_mysql.h"
|
||||
#include "datasink.h"
|
||||
#include "thread_pool.h"
|
||||
#include "xtrabackup.h"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace common_engine {
|
||||
|
||||
class BackupImpl;
|
||||
|
||||
class Backup {
|
||||
public:
|
||||
Backup(const char *datadir_path, ds_ctxt_t *datasink,
|
||||
std::vector<MYSQL *> &con_pool, ThreadPool &thread_pool);
|
||||
~Backup();
|
||||
Backup (Backup &&other) = delete;
|
||||
Backup & operator= (Backup &&other) = delete;
|
||||
Backup(const Backup &) = delete;
|
||||
Backup & operator= (const Backup &) = delete;
|
||||
bool scan(
|
||||
const std::unordered_set<table_key_t> &exclude_tables,
|
||||
std::unordered_set<table_key_t> *out_processed_tables,
|
||||
bool no_lock, bool collect_log_and_stats);
|
||||
bool copy_log_tables(bool finalize);
|
||||
bool copy_stats_tables();
|
||||
bool wait_for_finish();
|
||||
bool close_log_tables();
|
||||
void set_post_copy_table_hook(const post_copy_table_hook_t &hook);
|
||||
private:
|
||||
BackupImpl *m_backup_impl;
|
||||
};
|
||||
|
||||
} // namespace common_engine
|
||||
|
@ -80,11 +80,11 @@ ds_create(const char *root, ds_type_t type)
|
||||
/************************************************************************
|
||||
Open a datasink file */
|
||||
ds_file_t *
|
||||
ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat)
|
||||
ds_open(ds_ctxt_t *ctxt, const char *path, const MY_STAT *stat, bool rewrite)
|
||||
{
|
||||
ds_file_t *file;
|
||||
|
||||
file = ctxt->datasink->open(ctxt, path, stat);
|
||||
file = ctxt->datasink->open(ctxt, path, stat, rewrite);
|
||||
if (file != NULL) {
|
||||
file->datasink = ctxt->datasink;
|
||||
}
|
||||
@ -104,6 +104,30 @@ ds_write(ds_file_t *file, const void *buf, size_t len)
|
||||
return file->datasink->write(file, (const uchar *)buf, len);
|
||||
}
|
||||
|
||||
int ds_seek_set(ds_file_t *file, my_off_t offset) {
|
||||
DBUG_ASSERT(file);
|
||||
DBUG_ASSERT(file->datasink);
|
||||
if (file->datasink->seek_set)
|
||||
return file->datasink->seek_set(file, offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ds_rename(ds_ctxt_t *ctxt, const char *old_path, const char *new_path) {
|
||||
DBUG_ASSERT(ctxt);
|
||||
DBUG_ASSERT(ctxt->datasink);
|
||||
if (ctxt->datasink->rename)
|
||||
return ctxt->datasink->rename(ctxt, old_path, new_path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ds_remove(ds_ctxt_t *ctxt, const char *path) {
|
||||
DBUG_ASSERT(ctxt);
|
||||
DBUG_ASSERT(ctxt->datasink);
|
||||
if (ctxt->datasink->remove)
|
||||
return ctxt->datasink->mremove(ctxt, path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Close a datasink file.
|
||||
@return 0 on success, 1, on error. */
|
||||
|
@ -43,7 +43,8 @@ typedef struct ds_ctxt {
|
||||
*/
|
||||
bool copy_file(const char *src_file_path,
|
||||
const char *dst_file_path,
|
||||
uint thread_n);
|
||||
uint thread_n,
|
||||
bool rewrite = false);
|
||||
|
||||
bool move_file(const char *src_file_path,
|
||||
const char *dst_file_path,
|
||||
@ -76,10 +77,15 @@ typedef struct {
|
||||
|
||||
struct datasink_struct {
|
||||
ds_ctxt_t *(*init)(const char *root);
|
||||
ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
|
||||
ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path,
|
||||
const MY_STAT *stat, bool rewrite);
|
||||
int (*write)(ds_file_t *file, const unsigned char *buf, size_t len);
|
||||
int (*seek_set)(ds_file_t *file, my_off_t offset);
|
||||
int (*close)(ds_file_t *file);
|
||||
int (*remove)(const char *path);
|
||||
// TODO: consider to return bool from "rename" and "remove"
|
||||
int (*rename)(ds_ctxt_t *ctxt, const char *old_path, const char *new_path);
|
||||
int (*mremove)(ds_ctxt_t *ctxt, const char *path);
|
||||
void (*deinit)(ds_ctxt_t *ctxt);
|
||||
};
|
||||
|
||||
@ -106,12 +112,17 @@ ds_ctxt_t *ds_create(const char *root, ds_type_t type);
|
||||
|
||||
/************************************************************************
|
||||
Open a datasink file */
|
||||
ds_file_t *ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
|
||||
ds_file_t *ds_open(
|
||||
ds_ctxt_t *ctxt, const char *path, const MY_STAT *stat, bool rewrite = false);
|
||||
|
||||
/************************************************************************
|
||||
Write to a datasink file.
|
||||
@return 0 on success, 1 on error. */
|
||||
int ds_write(ds_file_t *file, const void *buf, size_t len);
|
||||
int ds_seek_set(ds_file_t *file, my_off_t offset);
|
||||
|
||||
int ds_rename(ds_ctxt_t *ctxt, const char *old_path, const char *new_path);
|
||||
int ds_remove(ds_ctxt_t *ctxt, const char *path);
|
||||
|
||||
/************************************************************************
|
||||
Close a datasink file.
|
||||
|
553
extra/mariabackup/ddl_log.cc
Normal file
553
extra/mariabackup/ddl_log.cc
Normal file
@ -0,0 +1,553 @@
|
||||
#include "ddl_log.h"
|
||||
#include "common.h"
|
||||
#include "my_sys.h"
|
||||
#include "sql_table.h"
|
||||
#include "backup_copy.h"
|
||||
#include "xtrabackup.h"
|
||||
#include <unordered_set>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <cstddef>
|
||||
|
||||
namespace ddl_log {
|
||||
|
||||
struct Entry {
|
||||
enum Type {
|
||||
CREATE,
|
||||
ALTER,
|
||||
RENAME,
|
||||
REPAIR,
|
||||
OPTIMIZE,
|
||||
DROP,
|
||||
TRUNCATE,
|
||||
CHANGE_INDEX,
|
||||
BULK_INSERT
|
||||
};
|
||||
Type type;
|
||||
std::string date;
|
||||
std::string engine;
|
||||
bool partitioned;
|
||||
std::string db;
|
||||
std::string table;
|
||||
std::string id;
|
||||
std::string new_engine;
|
||||
bool new_partitioned;
|
||||
std::string new_db;
|
||||
std::string new_table;
|
||||
std::string new_id;
|
||||
};
|
||||
|
||||
typedef std::vector<std::unique_ptr<Entry>> entries_t;
|
||||
typedef std::function<bool(std::unique_ptr<Entry>)> store_entry_func_t;
|
||||
|
||||
const char *aria_engine_name = "Aria";
|
||||
static const char *frm_ext = ".frm";
|
||||
static const char *database_keyword = "DATABASE";
|
||||
|
||||
const std::unordered_map<std::string, std::vector<const char *>> engine_exts =
|
||||
{
|
||||
{"Aria", {".MAD", ".MAI"}},
|
||||
{"MyISAM", {".MYD", ".MYI"}},
|
||||
{"MRG_MyISAM", {".MRG"}},
|
||||
{"ARCHIVE", {".ARM", ".ARZ"}},
|
||||
{"CSV", {".CSM", ".CSV"}}
|
||||
};
|
||||
|
||||
static inline bool known_engine(const std::string &engine) {
|
||||
return engine_exts.count(engine);
|
||||
}
|
||||
|
||||
// TODO: add error messages
|
||||
size_t parse(const uchar *buf, size_t buf_size, bool &error_flag,
|
||||
store_entry_func_t &store_entry_func) {
|
||||
DBUG_ASSERT(buf);
|
||||
static constexpr char token_delimiter = '\t';
|
||||
static constexpr char line_delimiter = '\n';
|
||||
enum {
|
||||
TOKEN_FIRST = 0,
|
||||
TOKEN_DATE = TOKEN_FIRST,
|
||||
TOKEN_TYPE,
|
||||
TOKEN_ENGINE,
|
||||
TOKEN_PARTITIONED,
|
||||
TOKEN_DB,
|
||||
TOKEN_TABLE,
|
||||
TOKEN_ID,
|
||||
TOKEN_MANDATORY = TOKEN_ID,
|
||||
TOKEN_NEW_ENGINE,
|
||||
TOKEN_NEW_PARTITIONED,
|
||||
TOKEN_NEW_DB,
|
||||
TOKEN_NEW_TABLE,
|
||||
TOKEN_NEW_ID,
|
||||
TOKEN_LAST = TOKEN_NEW_ID
|
||||
};
|
||||
const size_t string_offsets[TOKEN_LAST + 1] = {
|
||||
offsetof(Entry, date),
|
||||
offsetof(Entry, type), // not a string, be careful
|
||||
offsetof(Entry, engine),
|
||||
offsetof(Entry, partitioned), // not a string, be careful
|
||||
offsetof(Entry, db),
|
||||
offsetof(Entry, table),
|
||||
offsetof(Entry, id),
|
||||
offsetof(Entry, new_engine),
|
||||
offsetof(Entry, new_partitioned), // not a string, be careful
|
||||
offsetof(Entry, new_db),
|
||||
offsetof(Entry, new_table),
|
||||
offsetof(Entry, new_id)
|
||||
};
|
||||
const std::unordered_map<std::string, Entry::Type> str_to_type = {
|
||||
{"CREATE", Entry::CREATE},
|
||||
{"ALTER", Entry::ALTER},
|
||||
{"RENAME", Entry::RENAME},
|
||||
// TODO: fix to use uppercase-only
|
||||
{"repair", Entry::REPAIR},
|
||||
{"optimize", Entry::OPTIMIZE},
|
||||
{"DROP", Entry::DROP},
|
||||
{"TRUNCATE", Entry::TRUNCATE},
|
||||
{"CHANGE_INDEX", Entry::CHANGE_INDEX},
|
||||
{"BULK_INSERT", Entry::BULK_INSERT}
|
||||
};
|
||||
|
||||
const uchar *new_line = buf;
|
||||
const uchar *token_start = buf;
|
||||
unsigned token_num = TOKEN_FIRST;
|
||||
|
||||
error_flag = false;
|
||||
|
||||
std::unique_ptr<Entry> entry(new Entry());
|
||||
|
||||
for (const uchar *ptr = buf; ptr < buf + buf_size; ++ptr) {
|
||||
|
||||
if (*ptr != token_delimiter && *ptr != line_delimiter)
|
||||
continue;
|
||||
|
||||
if (token_start != ptr) {
|
||||
std::string token(token_start, ptr);
|
||||
|
||||
if (token_num == TOKEN_TYPE) {
|
||||
const auto type_it = str_to_type.find(token);
|
||||
if (type_it == str_to_type.end()) {
|
||||
error_flag = true;
|
||||
goto exit;
|
||||
}
|
||||
entry->type = type_it->second;
|
||||
}
|
||||
else if (token_num == TOKEN_PARTITIONED) {
|
||||
entry->partitioned = token[0] - '0';
|
||||
}
|
||||
else if (token_num == TOKEN_NEW_PARTITIONED) {
|
||||
entry->new_partitioned = token[0] - '0';
|
||||
}
|
||||
else if (token_num <= TOKEN_LAST) {
|
||||
DBUG_ASSERT(token_num != TOKEN_TYPE);
|
||||
DBUG_ASSERT(token_num != TOKEN_PARTITIONED);
|
||||
DBUG_ASSERT(token_num != TOKEN_NEW_PARTITIONED);
|
||||
reinterpret_cast<std::string *>
|
||||
(reinterpret_cast<uchar *>(entry.get()) + string_offsets[token_num])->
|
||||
assign(std::move(token));
|
||||
}
|
||||
else {
|
||||
error_flag = true;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
token_start = ptr + 1;
|
||||
|
||||
if (*ptr == line_delimiter) {
|
||||
if (token_num < TOKEN_MANDATORY) {
|
||||
error_flag = true;
|
||||
goto exit;
|
||||
}
|
||||
if (!store_entry_func(std::move(entry))) {
|
||||
error_flag = true;
|
||||
goto exit;
|
||||
}
|
||||
entry.reset(new Entry());
|
||||
token_num = TOKEN_FIRST;
|
||||
new_line = ptr + 1;
|
||||
} else
|
||||
++token_num;
|
||||
}
|
||||
|
||||
exit:
|
||||
return new_line - buf;
|
||||
}
|
||||
|
||||
bool parse(const char *file_path, store_entry_func_t store_entry_func) {
|
||||
DBUG_ASSERT(file_path);
|
||||
DBUG_ASSERT(store_entry_func);
|
||||
File file= -1;
|
||||
bool result = true;
|
||||
uchar buf[1024];
|
||||
size_t bytes_read = 0;
|
||||
size_t buf_read_offset = 0;
|
||||
|
||||
if ((file= my_open(file_path, O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
|
||||
MYF(MY_WME))) < 0) {
|
||||
msg("DDL log file %s open failed: %d", file_path, my_errno);
|
||||
result = false;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
while((bytes_read = my_read(
|
||||
file, &buf[buf_read_offset], sizeof(buf) - buf_read_offset, MY_WME)) > 0) {
|
||||
if (bytes_read == size_t(-1)) {
|
||||
msg("DDL log file %s read error: %d", file_path, my_errno);
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
bytes_read += buf_read_offset;
|
||||
bool parse_error_flag = false;
|
||||
size_t bytes_parsed = parse(
|
||||
buf, bytes_read, parse_error_flag, store_entry_func);
|
||||
if (parse_error_flag) {
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
size_t rest_size = bytes_read - bytes_parsed;
|
||||
if (rest_size)
|
||||
memcpy(buf, buf + bytes_parsed, rest_size);
|
||||
buf_read_offset = rest_size;
|
||||
}
|
||||
|
||||
exit:
|
||||
if (file >= 0)
|
||||
my_close(file, MYF(MY_WME));
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
static
|
||||
bool process_database(
|
||||
const char *datadir_path,
|
||||
ds_ctxt_t *ds,
|
||||
const Entry &entry,
|
||||
std::unordered_set<std::string> &dropped_databases) {
|
||||
|
||||
if (entry.type == Entry::Type::CREATE ||
|
||||
entry.type == Entry::Type::ALTER) {
|
||||
std::string opt_file(datadir_path);
|
||||
opt_file.append("/").append(entry.db).append("/db.opt");
|
||||
if (!ds->copy_file(opt_file.c_str(), opt_file.c_str(), 0, true)) {
|
||||
msg("Failed to re-copy %s.", opt_file.c_str());
|
||||
return false;
|
||||
}
|
||||
if (entry.type == Entry::Type::CREATE)
|
||||
dropped_databases.erase(entry.db);
|
||||
return true;
|
||||
}
|
||||
|
||||
DBUG_ASSERT(entry.type == Entry::Type::DROP);
|
||||
|
||||
std::string db_path(datadir_path);
|
||||
db_path.append("/").append(entry.db);
|
||||
const char *dst_path = convert_dst(db_path.c_str());
|
||||
if (!ds_remove(ds, dst_path)) {
|
||||
dropped_databases.insert(entry.db);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static
|
||||
std::unique_ptr<std::vector<std::string>>
|
||||
find_table_files(
|
||||
const char *dir_path,
|
||||
const std::string &db,
|
||||
const std::string &table) {
|
||||
|
||||
std::unique_ptr<std::vector<std::string>>
|
||||
result(new std::vector<std::string>());
|
||||
|
||||
std::string prefix = convert_tablename_to_filepath(dir_path, db, table);
|
||||
foreach_file_in_db_dirs(dir_path, [&](const char *file_name)->bool {
|
||||
if (!strncmp(file_name, prefix.c_str(), prefix.size())) {
|
||||
DBUG_ASSERT(strlen(file_name) >= prefix.size());
|
||||
if (file_name[prefix.size()] == '.' ||
|
||||
!strncmp(file_name + prefix.size(), "#P#", strlen("#P#")))
|
||||
result->push_back(std::string(file_name));
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static
|
||||
bool process_remove(
|
||||
const char *datadir_path,
|
||||
ds_ctxt_t *ds,
|
||||
const Entry &entry,
|
||||
bool remove_frm) {
|
||||
|
||||
if (check_if_skip_table(
|
||||
std::string(entry.db).append("/").append(entry.table).c_str()))
|
||||
return true;
|
||||
|
||||
auto ext_it = engine_exts.find(entry.engine);
|
||||
if (ext_it == engine_exts.end())
|
||||
return true;
|
||||
|
||||
std::string file_preffix = convert_tablename_to_filepath(datadir_path,
|
||||
entry.db, entry.table);
|
||||
const char *dst_preffix = convert_dst(file_preffix.c_str());
|
||||
|
||||
for (const char *ext : ext_it->second) {
|
||||
std::string old_name(dst_preffix);
|
||||
if (!entry.partitioned)
|
||||
old_name.append(ext);
|
||||
else
|
||||
old_name.append("#P#*");
|
||||
if (ds_remove(ds, old_name.c_str())) {
|
||||
msg("Failed to remove %s.", old_name.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (remove_frm) {
|
||||
std::string old_frm_name(dst_preffix);
|
||||
old_frm_name.append(frm_ext);
|
||||
if (ds_remove(ds, old_frm_name.c_str())) {
|
||||
msg("Failed to remove %s.", old_frm_name.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
static
|
||||
bool process_recopy(
|
||||
const char *datadir_path,
|
||||
ds_ctxt_t *ds,
|
||||
const Entry &entry,
|
||||
const tables_t &tables) {
|
||||
|
||||
if (check_if_skip_table(
|
||||
std::string(entry.db).append("/").append(entry.table).c_str()))
|
||||
return true;
|
||||
|
||||
const std::string &new_table_id =
|
||||
entry.new_id.empty() ? entry.id : entry.new_id;
|
||||
DBUG_ASSERT(!new_table_id.empty());
|
||||
const std::string &new_table =
|
||||
entry.new_table.empty() ? entry.table : entry.new_table;
|
||||
DBUG_ASSERT(!new_table.empty());
|
||||
const std::string &new_db =
|
||||
entry.new_db.empty() ? entry.db : entry.new_db;
|
||||
DBUG_ASSERT(!new_db.empty());
|
||||
const std::string &new_engine =
|
||||
entry.new_engine.empty() ? entry.engine : entry.new_engine;
|
||||
DBUG_ASSERT(!new_engine.empty());
|
||||
|
||||
if (entry.type != Entry::Type::BULK_INSERT) {
|
||||
auto table_it = tables.find(table_key(new_db, new_table));
|
||||
if (table_it != tables.end() &&
|
||||
table_it->second == new_table_id)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!entry.new_engine.empty() &&
|
||||
entry.engine != entry.new_engine &&
|
||||
!known_engine(entry.new_engine)) {
|
||||
return process_remove(datadir_path, ds, entry, false);
|
||||
}
|
||||
|
||||
if ((entry.partitioned || entry.new_partitioned) &&
|
||||
!process_remove(datadir_path, ds, entry, false))
|
||||
return false;
|
||||
|
||||
if (entry.partitioned || entry.new_partitioned) {
|
||||
auto files = find_table_files(datadir_path, new_db, new_table);
|
||||
if (!files.get())
|
||||
return true;
|
||||
for (const auto &file : *files) {
|
||||
const char *dst_path = convert_dst(file.c_str());
|
||||
if (!ds->copy_file(file.c_str(), dst_path, 0, true)) {
|
||||
msg("Failed to re-copy %s.", file.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
auto ext_it = engine_exts.find(new_engine);
|
||||
if (ext_it == engine_exts.end())
|
||||
return false;
|
||||
|
||||
for (const char *ext : ext_it->second) {
|
||||
std::string file_name =
|
||||
convert_tablename_to_filepath(datadir_path, new_db, new_table).
|
||||
append(ext);
|
||||
const char *dst_path = convert_dst(file_name.c_str());
|
||||
if (file_exists(file_name.c_str()) &&
|
||||
!ds->copy_file(file_name.c_str(), dst_path, 0, true)) {
|
||||
msg("Failed to re-copy %s.", file_name.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::string frm_file =
|
||||
convert_tablename_to_filepath(datadir_path, new_db, new_table).
|
||||
append(frm_ext);
|
||||
const char *frm_dst_path = convert_dst(frm_file.c_str());
|
||||
if (file_exists(frm_file.c_str()) &&
|
||||
!ds->copy_file(frm_file.c_str(), frm_dst_path, 0, true)) {
|
||||
msg("Failed to re-copy %s.", frm_file.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
bool process_rename(
|
||||
const char *datadir_path,
|
||||
ds_ctxt_t *ds,
|
||||
const Entry &entry) {
|
||||
|
||||
if (check_if_skip_table(
|
||||
std::string(entry.db).append("/").append(entry.table).c_str()))
|
||||
return true;
|
||||
|
||||
DBUG_ASSERT(entry.db != "partition");
|
||||
|
||||
auto ext_it = engine_exts.find(entry.engine);
|
||||
if (ext_it == engine_exts.end())
|
||||
return false;
|
||||
|
||||
std::string new_preffix = convert_tablename_to_filepath(datadir_path,
|
||||
entry.new_db, entry.new_table);
|
||||
const char *dst_path = convert_dst(new_preffix.c_str());
|
||||
|
||||
std::string old_preffix = convert_tablename_to_filepath(datadir_path,
|
||||
entry.db, entry.table);
|
||||
const char *src_path = convert_dst(old_preffix.c_str());
|
||||
|
||||
for (const char *ext : ext_it->second) {
|
||||
std::string old_name(src_path);
|
||||
old_name.append(ext);
|
||||
std::string new_name(dst_path);
|
||||
new_name.append(ext);
|
||||
if (ds_rename(ds, old_name.c_str(), new_name.c_str())) {
|
||||
msg("Failed to rename %s to %s.",
|
||||
old_name.c_str(), new_name.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::string new_frm_file = new_preffix + frm_ext;
|
||||
const char *new_frm_dst = convert_dst(new_frm_file.c_str());
|
||||
if (file_exists(new_frm_file.c_str()) &&
|
||||
!ds->copy_file(new_frm_file.c_str(), new_frm_dst, 0, true)) {
|
||||
msg("Failed to re-copy %s.", new_frm_file.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: return this code if .frm is copied not under BLOCK_DDL
|
||||
/*
|
||||
std::string old_frm_name(src_path);
|
||||
old_frm_name.append(frm_ext);
|
||||
std::string new_frm_name(dst_path);
|
||||
new_frm_name.append(frm_ext);
|
||||
if (ds_rename(ds, old_frm_name.c_str(), new_frm_name.c_str())) {
|
||||
msg("Failed to rename %s to %s.",
|
||||
old_frm_name.c_str(), new_frm_name.c_str());
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
bool backup(
|
||||
const char *datadir_path,
|
||||
ds_ctxt_t *ds,
|
||||
const tables_t &tables) {
|
||||
DBUG_ASSERT(datadir_path);
|
||||
DBUG_ASSERT(ds);
|
||||
char ddl_log_path[FN_REFLEN];
|
||||
fn_format(ddl_log_path, "ddl", datadir_path, ".log", 0);
|
||||
std::vector<std::unique_ptr<Entry>> entries;
|
||||
|
||||
std::unordered_set<std::string> processed_tables;
|
||||
std::unordered_set<std::string> dropped_databases;
|
||||
|
||||
bool parsing_result =
|
||||
parse(ddl_log_path, [&](std::unique_ptr<Entry> entry)->bool {
|
||||
|
||||
if (entry->engine == database_keyword)
|
||||
return process_database(datadir_path, ds, *entry, dropped_databases);
|
||||
|
||||
if (!known_engine(entry->engine) && !known_engine(entry->new_engine))
|
||||
return true;
|
||||
|
||||
if (entry->type == Entry::Type::CREATE ||
|
||||
(entry->type == Entry::Type::ALTER &&
|
||||
!entry->new_engine.empty() &&
|
||||
entry->engine != entry->new_engine)) {
|
||||
if (!process_recopy(datadir_path, ds, *entry, tables))
|
||||
return false;
|
||||
processed_tables.insert(table_key(entry->db, entry->table));
|
||||
if (entry->type == Entry::Type::ALTER)
|
||||
processed_tables.insert(table_key(entry->new_db, entry->new_table));
|
||||
return true;
|
||||
}
|
||||
|
||||
if (entry->type == Entry::Type::DROP) {
|
||||
if (!process_remove(datadir_path, ds, *entry, true))
|
||||
return false;
|
||||
processed_tables.insert(table_key(entry->db, entry->table));
|
||||
return true;
|
||||
}
|
||||
if (entry->type == Entry::Type::RENAME) {
|
||||
if (entry->partitioned) {
|
||||
if (!process_remove(datadir_path, ds, *entry, true))
|
||||
return false;
|
||||
Entry recopy_entry {
|
||||
entry->type,
|
||||
{},
|
||||
entry->new_engine.empty() ? entry->engine : entry->new_engine,
|
||||
true,
|
||||
entry->new_db,
|
||||
entry->new_table,
|
||||
entry->new_id,
|
||||
{}, true, {}, {}, {}
|
||||
};
|
||||
if (!process_recopy(datadir_path, ds, recopy_entry, tables))
|
||||
return false;
|
||||
}
|
||||
else if (!process_rename(datadir_path, ds, *entry))
|
||||
return false;
|
||||
processed_tables.insert(table_key(entry->db, entry->table));
|
||||
processed_tables.insert(table_key(entry->new_db, entry->new_table));
|
||||
return true;
|
||||
}
|
||||
|
||||
entries.push_back(std::move(entry));
|
||||
return true;
|
||||
|
||||
});
|
||||
|
||||
if (!parsing_result)
|
||||
return false;
|
||||
|
||||
|
||||
while (!entries.empty()) {
|
||||
auto entry = std::move(entries.back());
|
||||
entries.pop_back();
|
||||
auto tk = table_key(
|
||||
entry->new_db.empty() ? entry->db : entry->new_db,
|
||||
entry->new_table.empty() ? entry->table : entry->new_table);
|
||||
if (dropped_databases.count(entry->db) ||
|
||||
dropped_databases.count(entry->new_db))
|
||||
continue;
|
||||
if (processed_tables.count(tk))
|
||||
continue;
|
||||
processed_tables.insert(std::move(tk));
|
||||
if (!process_recopy(datadir_path, ds, *entry, tables))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace ddl_log
|
15
extra/mariabackup/ddl_log.h
Normal file
15
extra/mariabackup/ddl_log.h
Normal file
@ -0,0 +1,15 @@
|
||||
#pragma once
|
||||
#include "my_global.h"
|
||||
#include "datasink.h"
|
||||
#include "aria_backup_client.h"
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ddl_log {
|
||||
|
||||
typedef std::unordered_map<std::string, std::string> tables_t;
|
||||
bool backup(const char *datadir_path, ds_ctxt_t *ds, const tables_t &tables);
|
||||
|
||||
} // namespace ddl_log
|
@ -44,7 +44,7 @@ typedef struct {
|
||||
|
||||
static ds_ctxt_t *buffer_init(const char *root);
|
||||
static ds_file_t *buffer_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
static int buffer_write(ds_file_t *file, const uchar *buf, size_t len);
|
||||
static int buffer_close(ds_file_t *file);
|
||||
static void buffer_deinit(ds_ctxt_t *ctxt);
|
||||
@ -53,8 +53,11 @@ datasink_t datasink_buffer = {
|
||||
&buffer_init,
|
||||
&buffer_open,
|
||||
&buffer_write,
|
||||
nullptr,
|
||||
&buffer_close,
|
||||
&dummy_remove,
|
||||
nullptr,
|
||||
nullptr,
|
||||
&buffer_deinit
|
||||
};
|
||||
|
||||
@ -84,8 +87,10 @@ buffer_init(const char *root)
|
||||
}
|
||||
|
||||
static ds_file_t *
|
||||
buffer_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
|
||||
buffer_open(ds_ctxt_t *ctxt, const char *path,
|
||||
const MY_STAT *mystat, bool rewrite)
|
||||
{
|
||||
DBUG_ASSERT(rewrite == false);
|
||||
ds_buffer_ctxt_t *buffer_ctxt;
|
||||
ds_ctxt_t *pipe_ctxt;
|
||||
ds_file_t *dst_file;
|
||||
|
@ -65,7 +65,7 @@ extern ulonglong xtrabackup_compress_chunk_size;
|
||||
|
||||
static ds_ctxt_t *compress_init(const char *root);
|
||||
static ds_file_t *compress_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
static int compress_write(ds_file_t *file, const uchar *buf, size_t len);
|
||||
static int compress_close(ds_file_t *file);
|
||||
static void compress_deinit(ds_ctxt_t *ctxt);
|
||||
@ -74,8 +74,11 @@ datasink_t datasink_compress = {
|
||||
&compress_init,
|
||||
&compress_open,
|
||||
&compress_write,
|
||||
nullptr,
|
||||
&compress_close,
|
||||
&dummy_remove,
|
||||
nullptr,
|
||||
nullptr,
|
||||
&compress_deinit
|
||||
};
|
||||
|
||||
@ -116,8 +119,10 @@ compress_init(const char *root)
|
||||
|
||||
static
|
||||
ds_file_t *
|
||||
compress_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
|
||||
compress_open(ds_ctxt_t *ctxt, const char *path,
|
||||
const MY_STAT *mystat, bool rewrite)
|
||||
{
|
||||
DBUG_ASSERT(rewrite == false);
|
||||
ds_compress_ctxt_t *comp_ctxt;
|
||||
ds_ctxt_t *dest_ctxt;
|
||||
ds_file_t *dest_file;
|
||||
|
@ -42,8 +42,9 @@ typedef struct {
|
||||
|
||||
static ds_ctxt_t *local_init(const char *root);
|
||||
static ds_file_t *local_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
static int local_write(ds_file_t *file, const uchar *buf, size_t len);
|
||||
static int local_seek_set(ds_file_t *file, my_off_t offset);
|
||||
static int local_close(ds_file_t *file);
|
||||
static void local_deinit(ds_ctxt_t *ctxt);
|
||||
|
||||
@ -52,13 +53,20 @@ static int local_remove(const char *path)
|
||||
return unlink(path);
|
||||
}
|
||||
|
||||
static int local_rename(
|
||||
ds_ctxt_t *ctxt, const char *old_path, const char *new_path);
|
||||
static int local_mremove(ds_ctxt_t *ctxt, const char *path);
|
||||
|
||||
extern "C" {
|
||||
datasink_t datasink_local = {
|
||||
&local_init,
|
||||
&local_open,
|
||||
&local_write,
|
||||
&local_seek_set,
|
||||
&local_close,
|
||||
&local_remove,
|
||||
&local_rename,
|
||||
&local_mremove,
|
||||
&local_deinit
|
||||
};
|
||||
}
|
||||
@ -89,7 +97,7 @@ local_init(const char *root)
|
||||
static
|
||||
ds_file_t *
|
||||
local_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat __attribute__((unused)))
|
||||
const MY_STAT *mystat __attribute__((unused)), bool rewrite)
|
||||
{
|
||||
char fullpath[FN_REFLEN];
|
||||
char dirpath[FN_REFLEN];
|
||||
@ -111,8 +119,10 @@ local_open(ds_ctxt_t *ctxt, const char *path,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fd = my_create(fullpath, 0, O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
|
||||
MYF(MY_WME));
|
||||
// TODO: check in Windows and set the corresponding flags on fail
|
||||
fd = my_create(fullpath, 0,
|
||||
O_WRONLY | O_BINARY | (rewrite ? O_TRUNC : O_EXCL) | O_NOFOLLOW,
|
||||
MYF(MY_WME));
|
||||
if (fd < 0) {
|
||||
return NULL;
|
||||
}
|
||||
@ -194,8 +204,8 @@ static void init_ibd_data(ds_local_file_t *local_file, const uchar *buf, size_t
|
||||
return;
|
||||
}
|
||||
|
||||
auto flags = mach_read_from_4(&buf[FIL_PAGE_DATA + FSP_SPACE_FLAGS]);
|
||||
auto ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
|
||||
uint32_t flags = mach_read_from_4(&buf[FIL_PAGE_DATA + FSP_SPACE_FLAGS]);
|
||||
uint32_t ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
|
||||
local_file->pagesize= ssize == 0 ? UNIV_PAGE_SIZE_ORIG : ((UNIV_ZIP_SIZE_MIN >> 1) << ssize);
|
||||
local_file->compressed = fil_space_t::full_crc32(flags)
|
||||
? fil_space_t::is_compressed(flags)
|
||||
@ -239,6 +249,15 @@ local_write(ds_file_t *file, const uchar *buf, size_t len)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static
|
||||
int
|
||||
local_seek_set(ds_file_t *file, my_off_t offset) {
|
||||
ds_local_file_t *local_file= (ds_local_file_t *)file->ptr;
|
||||
if (my_seek(local_file->fd, offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set EOF at file's current position.*/
|
||||
static int set_eof(File fd)
|
||||
{
|
||||
@ -276,3 +295,77 @@ local_deinit(ds_ctxt_t *ctxt)
|
||||
my_free(ctxt->root);
|
||||
my_free(ctxt);
|
||||
}
|
||||
|
||||
|
||||
static int local_rename(
|
||||
ds_ctxt_t *ctxt, const char *old_path, const char *new_path) {
|
||||
char full_old_path[FN_REFLEN];
|
||||
char full_new_path[FN_REFLEN];
|
||||
fn_format(full_old_path, old_path, ctxt->root, "", MYF(MY_RELATIVE_PATH));
|
||||
fn_format(full_new_path, new_path, ctxt->root, "", MYF(MY_RELATIVE_PATH));
|
||||
// Ignore errors as .frm files can me copied separately.
|
||||
// TODO: return error processing here after the corresponding changes in
|
||||
// xtrabackup.cc
|
||||
(void)my_rename(full_old_path, full_new_path, MYF(0));
|
||||
// if (my_rename(full_old_path, full_new_path, MYF(0))) {
|
||||
// msg("Failed to rename file %s to %s", old_path, new_path);
|
||||
// return 1;
|
||||
// }
|
||||
return 0;
|
||||
}
|
||||
|
||||
// It's ok if destination does not contain the file or folder
|
||||
static int local_mremove(ds_ctxt_t *ctxt, const char *path) {
|
||||
char full_path[FN_REFLEN];
|
||||
fn_format(full_path, path, ctxt->root, "", MYF(MY_RELATIVE_PATH));
|
||||
size_t full_path_len = strlen(full_path);
|
||||
if (full_path[full_path_len - 1] == '*') {
|
||||
full_path[full_path_len - 1] = '\0';
|
||||
char *preffix = strrchr(full_path, '/');
|
||||
const char *full_path_dir = full_path;
|
||||
size_t preffix_len;
|
||||
if (preffix) {
|
||||
preffix_len = (full_path_len - 1) - (preffix - full_path);
|
||||
*(preffix++) = '\0';
|
||||
}
|
||||
else {
|
||||
preffix = full_path;
|
||||
preffix_len = full_path_len - 1;
|
||||
full_path_dir= IF_WIN(".\\", "./");
|
||||
}
|
||||
if (!preffix_len)
|
||||
return 0;
|
||||
MY_DIR *dir= my_dir(full_path_dir, 0);
|
||||
if (!dir)
|
||||
return 0;
|
||||
for (size_t i = 0; i < dir->number_of_files; ++i) {
|
||||
char full_fpath[FN_REFLEN];
|
||||
if (strncmp(dir->dir_entry[i].name, preffix, preffix_len))
|
||||
continue;
|
||||
fn_format(full_fpath, dir->dir_entry[i].name,
|
||||
full_path_dir, "", MYF(MY_RELATIVE_PATH));
|
||||
(void)my_delete(full_fpath, MYF(0));
|
||||
}
|
||||
my_dirend(dir);
|
||||
}
|
||||
else {
|
||||
MY_STAT stat;
|
||||
if (!my_stat(full_path, &stat, MYF(0)))
|
||||
return 0;
|
||||
MY_DIR *dir= my_dir(full_path, 0);
|
||||
if (!dir) {
|
||||
// TODO: check for error here if necessary
|
||||
(void)my_delete(full_path, MYF(0));
|
||||
return 0;
|
||||
}
|
||||
for (size_t i = 0; i < dir->number_of_files; ++i) {
|
||||
char full_fpath[FN_REFLEN];
|
||||
fn_format(full_fpath, dir->dir_entry[i].name,
|
||||
full_path, "", MYF(MY_RELATIVE_PATH));
|
||||
(void)my_delete(full_fpath, MYF(0));
|
||||
}
|
||||
my_dirend(dir);
|
||||
(void)my_rmtree(full_path, MYF(0));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ typedef struct {
|
||||
|
||||
static ds_ctxt_t *stdout_init(const char *root);
|
||||
static ds_file_t *stdout_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
static int stdout_write(ds_file_t *file, const uchar *buf, size_t len);
|
||||
static int stdout_close(ds_file_t *file);
|
||||
static void stdout_deinit(ds_ctxt_t *ctxt);
|
||||
@ -39,8 +39,11 @@ datasink_t datasink_stdout = {
|
||||
&stdout_init,
|
||||
&stdout_open,
|
||||
&stdout_write,
|
||||
nullptr,
|
||||
&stdout_close,
|
||||
&dummy_remove,
|
||||
nullptr,
|
||||
nullptr,
|
||||
&stdout_deinit
|
||||
};
|
||||
|
||||
@ -61,8 +64,9 @@ static
|
||||
ds_file_t *
|
||||
stdout_open(ds_ctxt_t *ctxt __attribute__((unused)),
|
||||
const char *path __attribute__((unused)),
|
||||
MY_STAT *mystat __attribute__((unused)))
|
||||
const MY_STAT *mystat __attribute__((unused)), bool rewrite)
|
||||
{
|
||||
DBUG_ASSERT(rewrite == false);
|
||||
ds_stdout_file_t *stdout_file;
|
||||
ds_file_t *file;
|
||||
size_t pathlen;
|
||||
|
@ -41,7 +41,7 @@ typedef struct {
|
||||
|
||||
static ds_ctxt_t *tmpfile_init(const char *root);
|
||||
static ds_file_t *tmpfile_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
static int tmpfile_write(ds_file_t *file, const uchar *buf, size_t len);
|
||||
static int tmpfile_close(ds_file_t *file);
|
||||
static void tmpfile_deinit(ds_ctxt_t *ctxt);
|
||||
@ -50,8 +50,11 @@ datasink_t datasink_tmpfile = {
|
||||
&tmpfile_init,
|
||||
&tmpfile_open,
|
||||
&tmpfile_write,
|
||||
nullptr,
|
||||
&tmpfile_close,
|
||||
&dummy_remove,
|
||||
nullptr,
|
||||
nullptr,
|
||||
&tmpfile_deinit
|
||||
};
|
||||
|
||||
@ -80,8 +83,9 @@ tmpfile_init(const char *root)
|
||||
|
||||
static ds_file_t *
|
||||
tmpfile_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat)
|
||||
const MY_STAT *mystat, bool rewrite)
|
||||
{
|
||||
DBUG_ASSERT(rewrite == false);
|
||||
ds_tmpfile_ctxt_t *tmpfile_ctxt;
|
||||
char tmp_path[FN_REFLEN];
|
||||
ds_tmp_file_t *tmp_file;
|
||||
|
@ -40,24 +40,31 @@ General streaming interface */
|
||||
|
||||
static ds_ctxt_t *xbstream_init(const char *root);
|
||||
static ds_file_t *xbstream_open(ds_ctxt_t *ctxt, const char *path,
|
||||
MY_STAT *mystat);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
static int xbstream_write(ds_file_t *file, const uchar *buf, size_t len);
|
||||
static int xbstream_seek_set(ds_file_t *file, my_off_t offset);
|
||||
static int xbstream_close(ds_file_t *file);
|
||||
static void xbstream_deinit(ds_ctxt_t *ctxt);
|
||||
|
||||
static int xbstream_rename(
|
||||
ds_ctxt_t *ctxt, const char *old_path, const char *new_path);
|
||||
static int xbstream_mremove(ds_ctxt_t *ctxt, const char *path);
|
||||
|
||||
datasink_t datasink_xbstream = {
|
||||
&xbstream_init,
|
||||
&xbstream_open,
|
||||
&xbstream_write,
|
||||
&xbstream_seek_set,
|
||||
&xbstream_close,
|
||||
&dummy_remove,
|
||||
&xbstream_rename,
|
||||
&xbstream_mremove,
|
||||
&xbstream_deinit
|
||||
};
|
||||
|
||||
static
|
||||
ssize_t
|
||||
my_xbstream_write_callback(xb_wstream_file_t *f __attribute__((unused)),
|
||||
void *userdata, const void *buf, size_t len)
|
||||
my_xbstream_write_callback(void *userdata, const void *buf, size_t len)
|
||||
{
|
||||
ds_stream_ctxt_t *stream_ctxt;
|
||||
|
||||
@ -89,7 +96,7 @@ xbstream_init(const char *root __attribute__((unused)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
xbstream = xb_stream_write_new();
|
||||
xbstream = xb_stream_write_new(my_xbstream_write_callback, stream_ctxt);
|
||||
if (xbstream == NULL) {
|
||||
msg("xb_stream_write_new() failed.");
|
||||
goto err;
|
||||
@ -108,7 +115,8 @@ err:
|
||||
|
||||
static
|
||||
ds_file_t *
|
||||
xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
|
||||
xbstream_open(ds_ctxt_t *ctxt, const char *path,
|
||||
const MY_STAT *mystat, bool rewrite)
|
||||
{
|
||||
ds_file_t *file;
|
||||
ds_stream_file_t *stream_file;
|
||||
@ -144,9 +152,7 @@ xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
|
||||
|
||||
xbstream = stream_ctxt->xbstream;
|
||||
|
||||
xbstream_file = xb_stream_write_open(xbstream, path, mystat,
|
||||
stream_ctxt,
|
||||
my_xbstream_write_callback);
|
||||
xbstream_file = xb_stream_write_open(xbstream, path, mystat, rewrite);
|
||||
|
||||
if (xbstream_file == NULL) {
|
||||
msg("xb_stream_write_open() failed.");
|
||||
@ -190,6 +196,45 @@ xbstream_write(ds_file_t *file, const uchar *buf, size_t len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
int
|
||||
xbstream_seek_set(ds_file_t *file, my_off_t offset)
|
||||
{
|
||||
ds_stream_file_t *stream_file;
|
||||
xb_wstream_file_t *xbstream_file;
|
||||
|
||||
|
||||
stream_file = (ds_stream_file_t *) file->ptr;
|
||||
|
||||
xbstream_file = stream_file->xbstream_file;
|
||||
|
||||
if (xb_stream_write_seek_set(xbstream_file, offset)) {
|
||||
msg("xb_stream_write_seek_set() failed.");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
int
|
||||
xbstream_mremove(ds_ctxt_t *ctxt, const char *path) {
|
||||
ds_stream_ctxt_t *stream_ctxt =
|
||||
reinterpret_cast<ds_stream_ctxt_t *>(ctxt->ptr);
|
||||
xb_wstream_t *xbstream = stream_ctxt->xbstream;
|
||||
return xb_stream_write_remove(xbstream, path);
|
||||
}
|
||||
|
||||
static
|
||||
int
|
||||
xbstream_rename(
|
||||
ds_ctxt_t *ctxt, const char *old_path, const char *new_path) {
|
||||
ds_stream_ctxt_t *stream_ctxt =
|
||||
reinterpret_cast<ds_stream_ctxt_t *>(ctxt->ptr);
|
||||
xb_wstream_t *xbstream = stream_ctxt->xbstream;
|
||||
return xb_stream_write_rename(xbstream, old_path, new_path);
|
||||
}
|
||||
|
||||
static
|
||||
int
|
||||
xbstream_close(ds_file_t *file)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2017, 2022, MariaDB Corporation.
|
||||
/* Copyright (c) 2017, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -17,18 +17,18 @@
|
||||
#include <mysqld.h>
|
||||
#include <mysql.h>
|
||||
#include <xtrabackup.h>
|
||||
#include <xb_plugin.h>
|
||||
#include <encryption_plugin.h>
|
||||
#include <sql_plugin.h>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <common.h>
|
||||
#include <backup_mysql.h>
|
||||
#include <srv0srv.h>
|
||||
#include <log0crypt.h>
|
||||
|
||||
|
||||
extern struct st_maria_plugin *mysql_optional_plugins[];
|
||||
extern struct st_maria_plugin *mysql_mandatory_plugins[];
|
||||
static void xb_plugin_init(int argc, char **argv);
|
||||
static void encryption_plugin_init(int argc, char **argv);
|
||||
|
||||
extern char *xb_plugin_load;
|
||||
extern char *xb_plugin_dir;
|
||||
@ -42,7 +42,7 @@ const char *QUERY_PLUGIN =
|
||||
" OR (plugin_type = 'DAEMON' AND plugin_name LIKE 'provider\\_%')"
|
||||
" AND plugin_status='ACTIVE'";
|
||||
|
||||
std::string xb_plugin_config;
|
||||
std::string encryption_plugin_config;
|
||||
|
||||
static void add_to_plugin_load_list(const char *plugin_def)
|
||||
{
|
||||
@ -52,16 +52,16 @@ static void add_to_plugin_load_list(const char *plugin_def)
|
||||
static char XTRABACKUP_EXE[] = "xtrabackup";
|
||||
|
||||
/*
|
||||
Read "plugin-load" value from backup-my.cnf during prepare phase.
|
||||
Read "plugin-load" value (encryption plugin) from backup-my.cnf during
|
||||
prepare phase.
|
||||
The value is stored during backup phase.
|
||||
*/
|
||||
static std::string get_plugin_from_cnf(const char *dir)
|
||||
static std::string get_encryption_plugin_from_cnf()
|
||||
{
|
||||
std::string path = dir + std::string("/backup-my.cnf");
|
||||
FILE *f = fopen(path.c_str(), "r");
|
||||
FILE *f = fopen("backup-my.cnf", "r");
|
||||
if (!f)
|
||||
{
|
||||
die("Can't open %s for reading", path.c_str());
|
||||
die("Can't open backup-my.cnf for reading");
|
||||
}
|
||||
char line[512];
|
||||
std::string plugin_load;
|
||||
@ -80,7 +80,7 @@ static std::string get_plugin_from_cnf(const char *dir)
|
||||
}
|
||||
|
||||
|
||||
void xb_plugin_backup_init(MYSQL *mysql)
|
||||
void encryption_plugin_backup_init(MYSQL *mysql)
|
||||
{
|
||||
MYSQL_RES *result;
|
||||
MYSQL_ROW row;
|
||||
@ -163,7 +163,7 @@ void xb_plugin_backup_init(MYSQL *mysql)
|
||||
mysql_free_result(result);
|
||||
}
|
||||
|
||||
xb_plugin_config = oss.str();
|
||||
encryption_plugin_config = oss.str();
|
||||
|
||||
argc = 0;
|
||||
argv[argc++] = XTRABACKUP_EXE;
|
||||
@ -175,23 +175,23 @@ void xb_plugin_backup_init(MYSQL *mysql)
|
||||
}
|
||||
argv[argc] = 0;
|
||||
|
||||
xb_plugin_init(argc, argv);
|
||||
encryption_plugin_init(argc, argv);
|
||||
}
|
||||
|
||||
const char *xb_plugin_get_config()
|
||||
const char *encryption_plugin_get_config()
|
||||
{
|
||||
return xb_plugin_config.c_str();
|
||||
return encryption_plugin_config.c_str();
|
||||
}
|
||||
|
||||
extern int finalize_encryption_plugin(st_plugin_int *plugin);
|
||||
|
||||
|
||||
void xb_plugin_prepare_init(int argc, char **argv, const char *dir)
|
||||
void encryption_plugin_prepare_init(int argc, char **argv)
|
||||
{
|
||||
std::string plugin_load= get_plugin_from_cnf(dir ? dir : ".");
|
||||
std::string plugin_load= get_encryption_plugin_from_cnf();
|
||||
if (plugin_load.size())
|
||||
{
|
||||
msg("Loading plugins from %s", plugin_load.c_str());
|
||||
msg("Loading encryption plugin from %s", plugin_load.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -211,19 +211,19 @@ void xb_plugin_prepare_init(int argc, char **argv, const char *dir)
|
||||
new_argv[0] = XTRABACKUP_EXE;
|
||||
memcpy(&new_argv[1], argv, argc*sizeof(char *));
|
||||
|
||||
xb_plugin_init(argc+1, new_argv);
|
||||
encryption_plugin_init(argc+1, new_argv);
|
||||
|
||||
delete[] new_argv;
|
||||
}
|
||||
|
||||
static void xb_plugin_init(int argc, char **argv)
|
||||
static void encryption_plugin_init(int argc, char **argv)
|
||||
{
|
||||
/* Patch optional and mandatory plugins, we only need to load the one in xb_plugin_load. */
|
||||
mysql_optional_plugins[0] = mysql_mandatory_plugins[0] = 0;
|
||||
plugin_maturity = MariaDB_PLUGIN_MATURITY_UNKNOWN; /* mariabackup accepts all plugins */
|
||||
msg("Loading plugins");
|
||||
msg("Loading encryption plugin");
|
||||
for (int i= 1; i < argc; i++)
|
||||
msg("\t Plugin parameter : '%s'", argv[i]);
|
||||
msg("\t Encryption plugin parameter : '%s'", argv[i]);
|
||||
plugin_init(&argc, argv, PLUGIN_INIT_SKIP_PLUGIN_TABLE);
|
||||
}
|
||||
|
7
extra/mariabackup/encryption_plugin.h
Normal file
7
extra/mariabackup/encryption_plugin.h
Normal file
@ -0,0 +1,7 @@
|
||||
#include <mysql.h>
|
||||
#include <string>
|
||||
extern void encryption_plugin_backup_init(MYSQL *mysql);
|
||||
extern const char* encryption_plugin_get_config();
|
||||
extern void encryption_plugin_prepare_init(int argc, char **argv);
|
||||
|
||||
//extern void encryption_plugin_init(int argc, char **argv);
|
@ -231,8 +231,7 @@ xb_fil_cur_open(
|
||||
/ cursor->page_size);
|
||||
|
||||
cursor->read_filter = read_filter;
|
||||
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor,
|
||||
node->space->id);
|
||||
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor);
|
||||
|
||||
return(XB_FIL_CUR_SUCCESS);
|
||||
}
|
||||
@ -502,10 +501,6 @@ xb_fil_cur_close(
|
||||
/*=============*/
|
||||
xb_fil_cur_t *cursor) /*!< in/out: source file cursor */
|
||||
{
|
||||
if (cursor->read_filter) {
|
||||
cursor->read_filter->deinit(&cursor->read_filter_ctxt);
|
||||
}
|
||||
|
||||
aligned_free(cursor->buf);
|
||||
cursor->buf = NULL;
|
||||
|
||||
|
@ -27,6 +27,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
#include <my_dir.h>
|
||||
#include "read_filt.h"
|
||||
#include "mtr0types.h"
|
||||
#include "srv0start.h"
|
||||
#include "srv0srv.h"
|
||||
#include "xtrabackup.h"
|
||||
|
@ -78,10 +78,8 @@ my_bool opt_ibx_galera_info = FALSE;
|
||||
my_bool opt_ibx_slave_info = FALSE;
|
||||
my_bool opt_ibx_no_lock = FALSE;
|
||||
my_bool opt_ibx_safe_slave_backup = FALSE;
|
||||
my_bool opt_ibx_rsync = FALSE;
|
||||
my_bool opt_ibx_force_non_empty_dirs = FALSE;
|
||||
my_bool opt_ibx_noversioncheck = FALSE;
|
||||
my_bool opt_ibx_no_backup_locks = FALSE;
|
||||
my_bool opt_ibx_decompress = FALSE;
|
||||
|
||||
char *opt_ibx_incremental_history_name = NULL;
|
||||
@ -268,8 +266,10 @@ static struct my_option ibx_long_options[] =
|
||||
(uchar *) &opt_ibx_incremental, (uchar *) &opt_ibx_incremental, 0,
|
||||
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"no-lock", OPT_NO_LOCK, "Use this option to disable table lock "
|
||||
"with \"FLUSH TABLES WITH READ LOCK\". Use it only if ALL your "
|
||||
{"no-lock", OPT_NO_LOCK, "This option should not be used as "
|
||||
"mariadb-backup now is using BACKUP LOCKS, which minimizes the "
|
||||
"lock time. ALTER TABLE can run in parallel with BACKUP LOCKS."
|
||||
"Use the --no-lock option it only if ALL your "
|
||||
"tables are InnoDB and you DO NOT CARE about the binary log "
|
||||
"position of the backup. This option shouldn't be used if there "
|
||||
"are any DDL statements being executed or if any updates are "
|
||||
@ -297,15 +297,6 @@ static struct my_option ibx_long_options[] =
|
||||
(uchar *) &opt_ibx_safe_slave_backup,
|
||||
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"rsync", OPT_RSYNC, "Uses the rsync utility to optimize local file "
|
||||
"transfers. When this option is specified, innobackupex uses rsync "
|
||||
"to copy all non-InnoDB files instead of spawning a separate cp for "
|
||||
"each file, which can be much faster for servers with a large number "
|
||||
"of databases or tables. This option cannot be used together with "
|
||||
"--stream.",
|
||||
(uchar *) &opt_ibx_rsync, (uchar *) &opt_ibx_rsync,
|
||||
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"force-non-empty-directories", OPT_FORCE_NON_EMPTY_DIRS, "This "
|
||||
"option, when specified, makes --copy-back or --move-back transfer "
|
||||
"files to non-empty directories. Note that no existing files will be "
|
||||
@ -330,13 +321,9 @@ static struct my_option ibx_long_options[] =
|
||||
(uchar *) &opt_ibx_noversioncheck,
|
||||
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"no-backup-locks", OPT_NO_BACKUP_LOCKS, "This option controls if "
|
||||
"backup locks should be used instead of FLUSH TABLES WITH READ LOCK "
|
||||
"on the backup stage. The option has no effect when backup locks are "
|
||||
"not supported by the server. This option is enabled by default, "
|
||||
"disable with --no-backup-locks.",
|
||||
(uchar *) &opt_ibx_no_backup_locks,
|
||||
(uchar *) &opt_ibx_no_backup_locks,
|
||||
{"no-backup-locks", OPT_NO_BACKUP_LOCKS,
|
||||
"Old disabled option which has no effect anymore.",
|
||||
(uchar *) 0, (uchar*) 0,
|
||||
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"decompress", OPT_DECOMPRESS, "Decompresses all files with the .qp "
|
||||
@ -402,11 +389,10 @@ static struct my_option ibx_long_options[] =
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE,
|
||||
"This option specifies which types of queries are allowed to complete "
|
||||
"before innobackupex will issue the global lock. Default is all.",
|
||||
(uchar*) &opt_ibx_lock_wait_query_type,
|
||||
(uchar*) &opt_ibx_lock_wait_query_type, &query_type_typelib,
|
||||
GET_ENUM, REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0},
|
||||
"Old disabled option which has no effect anymore (not needed "
|
||||
"with BACKUP LOCKS)",
|
||||
(uchar*) 0, (uchar*) 0, &query_type_typelib, GET_ENUM,
|
||||
REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0},
|
||||
|
||||
{"kill-long-query-type", OPT_KILL_LONG_QUERY_TYPE,
|
||||
"This option specifies which types of queries should be killed to "
|
||||
@ -447,32 +433,32 @@ static struct my_option ibx_long_options[] =
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"kill-long-queries-timeout", OPT_KILL_LONG_QUERIES_TIMEOUT,
|
||||
"This option specifies the number of seconds innobackupex waits "
|
||||
"between starting FLUSH TABLES WITH READ LOCK and killing those "
|
||||
"queries that block it. Default is 0 seconds, which means "
|
||||
"innobackupex will not attempt to kill any queries.",
|
||||
(uchar*) &opt_ibx_kill_long_queries_timeout,
|
||||
(uchar*) &opt_ibx_kill_long_queries_timeout, 0, GET_UINT,
|
||||
"Old disabled option which has no effect anymore (not needed "
|
||||
"with BACKUP LOCKS)",
|
||||
(uchar*) 0, (uchar*) 0, 0, GET_UINT,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"ftwrl-wait-timeout", OPT_LOCK_WAIT_TIMEOUT,
|
||||
"This option specifies time in seconds that innobackupex should wait "
|
||||
"for queries that would block FTWRL before running it. If there are "
|
||||
"still such queries when the timeout expires, innobackupex terminates "
|
||||
"with an error. Default is 0, in which case innobackupex does not "
|
||||
"wait for queries to complete and starts FTWRL immediately.",
|
||||
(uchar*) &opt_ibx_lock_wait_timeout,
|
||||
(uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT,
|
||||
"Alias for startup-wait-timeout",
|
||||
(uchar*) &opt_ibx_lock_wait_timeout,
|
||||
(uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"startup-wait-timeout", OPT_LOCK_WAIT_TIMEOUT,
|
||||
"This option specifies time in seconds that mariadb-backup should wait for "
|
||||
"BACKUP STAGE START to complete. BACKUP STAGE START has to wait until all "
|
||||
"currently running queries using explicite LOCK TABLES has ended. "
|
||||
"If there are still such queries when the timeout expires, mariadb-backup "
|
||||
"terminates with an error. Default is 0, in which case mariadb-backup waits "
|
||||
"indefinitely for BACKUP STAGE START to finish",
|
||||
(uchar*) &opt_ibx_lock_wait_timeout,
|
||||
(uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{"ftwrl-wait-threshold", OPT_LOCK_WAIT_THRESHOLD,
|
||||
"This option specifies the query run time threshold which is used by "
|
||||
"innobackupex to detect long-running queries with a non-zero value "
|
||||
"of --ftwrl-wait-timeout. FTWRL is not started until such "
|
||||
"long-running queries exist. This option has no effect if "
|
||||
"--ftwrl-wait-timeout is 0. Default value is 60 seconds.",
|
||||
(uchar*) &opt_ibx_lock_wait_threshold,
|
||||
(uchar*) &opt_ibx_lock_wait_threshold, 0, GET_UINT,
|
||||
"Old disabled option which has no effect anymore (not needed "
|
||||
"with BACKUP LOCKS)",
|
||||
(uchar*) 0, (uchar*) 0, 0, GET_UINT,
|
||||
REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
|
||||
|
||||
{"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
|
||||
@ -864,10 +850,8 @@ ibx_init()
|
||||
opt_slave_info = opt_ibx_slave_info;
|
||||
opt_no_lock = opt_ibx_no_lock;
|
||||
opt_safe_slave_backup = opt_ibx_safe_slave_backup;
|
||||
opt_rsync = opt_ibx_rsync;
|
||||
opt_force_non_empty_dirs = opt_ibx_force_non_empty_dirs;
|
||||
opt_noversioncheck = opt_ibx_noversioncheck;
|
||||
opt_no_backup_locks = opt_ibx_no_backup_locks;
|
||||
opt_decompress = opt_ibx_decompress;
|
||||
|
||||
opt_incremental_history_name = opt_ibx_incremental_history_name;
|
||||
|
@ -32,29 +32,13 @@ Perform read filter context initialization that is common to all read
|
||||
filters. */
|
||||
static
|
||||
void
|
||||
common_init(
|
||||
/*========*/
|
||||
rf_pass_through_init(
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
|
||||
const xb_fil_cur_t* cursor) /*!<in: file cursor */
|
||||
{
|
||||
ctxt->offset = 0;
|
||||
ctxt->data_file_size = cursor->statinfo.st_size;
|
||||
ctxt->buffer_capacity = cursor->buf_size;
|
||||
ctxt->page_size = cursor->page_size;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Initialize the pass-through read filter. */
|
||||
static
|
||||
void
|
||||
rf_pass_through_init(
|
||||
/*=================*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
|
||||
const xb_fil_cur_t* cursor, /*!<in: file cursor */
|
||||
ulint space_id __attribute__((unused)))
|
||||
/*!<in: space id we are reading */
|
||||
{
|
||||
common_init(ctxt, cursor);
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
@ -65,143 +49,25 @@ rf_pass_through_get_next_batch(
|
||||
/*===========================*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
||||
context */
|
||||
ib_int64_t* read_batch_start, /*!<out: starting read
|
||||
int64_t* read_batch_start, /*!<out: starting read
|
||||
offset in bytes for the
|
||||
next batch of pages */
|
||||
ib_int64_t* read_batch_len) /*!<out: length in
|
||||
int64_t* read_batch_len) /*!<out: length in
|
||||
bytes of the next batch
|
||||
of pages */
|
||||
{
|
||||
*read_batch_start = ctxt->offset;
|
||||
*read_batch_len = ctxt->data_file_size - ctxt->offset;
|
||||
|
||||
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
|
||||
if (*read_batch_len > (int64_t)ctxt->buffer_capacity) {
|
||||
*read_batch_len = ctxt->buffer_capacity;
|
||||
}
|
||||
|
||||
ctxt->offset += *read_batch_len;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Deinitialize the pass-through read filter. */
|
||||
static
|
||||
void
|
||||
rf_pass_through_deinit(
|
||||
/*===================*/
|
||||
xb_read_filt_ctxt_t* ctxt __attribute__((unused)))
|
||||
/*!<in: read filter context */
|
||||
{
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Initialize the changed page bitmap-based read filter. Assumes that
|
||||
the bitmap is already set up in changed_page_bitmap. */
|
||||
static
|
||||
void
|
||||
rf_bitmap_init(
|
||||
/*===========*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
||||
context */
|
||||
const xb_fil_cur_t* cursor, /*!<in: read cursor */
|
||||
ulint space_id) /*!<in: space id */
|
||||
{
|
||||
common_init(ctxt, cursor);
|
||||
ctxt->bitmap_range = xb_page_bitmap_range_init(changed_page_bitmap,
|
||||
space_id);
|
||||
ctxt->filter_batch_end = 0;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Get the next batch of pages for the bitmap read filter. */
|
||||
static
|
||||
void
|
||||
rf_bitmap_get_next_batch(
|
||||
/*=====================*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
||||
context */
|
||||
ib_int64_t* read_batch_start, /*!<out: starting read
|
||||
offset in bytes for the
|
||||
next batch of pages */
|
||||
ib_int64_t* read_batch_len) /*!<out: length in
|
||||
bytes of the next batch
|
||||
of pages */
|
||||
{
|
||||
ulint start_page_id;
|
||||
const ulint page_size = ctxt->page_size;
|
||||
|
||||
start_page_id = (ulint)(ctxt->offset / page_size);
|
||||
|
||||
xb_a (ctxt->offset % page_size == 0);
|
||||
|
||||
if (start_page_id == ctxt->filter_batch_end) {
|
||||
|
||||
/* Used up all the previous bitmap range, get some more */
|
||||
ulint next_page_id;
|
||||
|
||||
/* Find the next changed page using the bitmap */
|
||||
next_page_id = xb_page_bitmap_range_get_next_bit
|
||||
(ctxt->bitmap_range, TRUE);
|
||||
|
||||
if (next_page_id == ULINT_UNDEFINED) {
|
||||
*read_batch_len = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
ctxt->offset = next_page_id * page_size;
|
||||
|
||||
/* Find the end of the current changed page block by searching
|
||||
for the next cleared bitmap bit */
|
||||
ctxt->filter_batch_end
|
||||
= xb_page_bitmap_range_get_next_bit(ctxt->bitmap_range,
|
||||
FALSE);
|
||||
xb_a(next_page_id < ctxt->filter_batch_end);
|
||||
}
|
||||
|
||||
*read_batch_start = ctxt->offset;
|
||||
if (ctxt->filter_batch_end == ULINT_UNDEFINED) {
|
||||
/* No more cleared bits in the bitmap, need to copy all the
|
||||
remaining pages. */
|
||||
*read_batch_len = ctxt->data_file_size - ctxt->offset;
|
||||
} else {
|
||||
*read_batch_len = ctxt->filter_batch_end * page_size
|
||||
- ctxt->offset;
|
||||
}
|
||||
|
||||
/* If the page block is larger than the buffer capacity, limit it to
|
||||
buffer capacity. The subsequent invocations will continue returning
|
||||
the current block in buffer-sized pieces until ctxt->filter_batch_end
|
||||
is reached, trigerring the next bitmap query. */
|
||||
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
|
||||
*read_batch_len = ctxt->buffer_capacity;
|
||||
}
|
||||
|
||||
ctxt->offset += *read_batch_len;
|
||||
xb_a (ctxt->offset % page_size == 0);
|
||||
xb_a (*read_batch_start % page_size == 0);
|
||||
xb_a (*read_batch_len % page_size == 0);
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Deinitialize the changed page bitmap-based read filter. */
|
||||
static
|
||||
void
|
||||
rf_bitmap_deinit(
|
||||
/*=============*/
|
||||
xb_read_filt_ctxt_t* ctxt) /*!<in/out: read filter context */
|
||||
{
|
||||
xb_page_bitmap_range_deinit(ctxt->bitmap_range);
|
||||
}
|
||||
|
||||
/* The pass-through read filter */
|
||||
xb_read_filt_t rf_pass_through = {
|
||||
&rf_pass_through_init,
|
||||
&rf_pass_through_get_next_batch,
|
||||
&rf_pass_through_deinit
|
||||
};
|
||||
|
||||
/* The changed page bitmap-based read filter */
|
||||
xb_read_filt_t rf_bitmap = {
|
||||
&rf_bitmap_init,
|
||||
&rf_bitmap_get_next_batch,
|
||||
&rf_bitmap_deinit
|
||||
};
|
||||
|
@ -25,42 +25,27 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#ifndef XB_READ_FILT_H
|
||||
#define XB_READ_FILT_H
|
||||
|
||||
#include "changed_page_bitmap.h"
|
||||
|
||||
typedef uint32_t space_id_t;
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
|
||||
struct xb_fil_cur_t;
|
||||
|
||||
/* The read filter context */
|
||||
struct xb_read_filt_ctxt_t {
|
||||
ib_int64_t offset; /*!< current file offset */
|
||||
ib_int64_t data_file_size; /*!< data file size */
|
||||
int64_t offset; /*!< current file offset */
|
||||
int64_t data_file_size; /*!< data file size */
|
||||
size_t buffer_capacity;/*!< read buffer capacity */
|
||||
space_id_t space_id; /*!< space id */
|
||||
/* The following fields used only in bitmap filter */
|
||||
/* Move these to union if any other filters are added in future */
|
||||
xb_page_bitmap_range *bitmap_range; /*!< changed page bitmap range
|
||||
iterator for space_id */
|
||||
ulint page_size; /*!< page size */
|
||||
ulint filter_batch_end;/*!< the ending page id of the
|
||||
current changed page block in
|
||||
the bitmap */
|
||||
/** TODO: remove this default constructor */
|
||||
xb_read_filt_ctxt_t() : page_size(0) {}
|
||||
};
|
||||
|
||||
/* The read filter */
|
||||
struct xb_read_filt_t {
|
||||
void (*init)(xb_read_filt_ctxt_t* ctxt,
|
||||
const xb_fil_cur_t* cursor,
|
||||
ulint space_id);
|
||||
const xb_fil_cur_t* cursor);
|
||||
void (*get_next_batch)(xb_read_filt_ctxt_t* ctxt,
|
||||
ib_int64_t* read_batch_start,
|
||||
ib_int64_t* read_batch_len);
|
||||
void (*deinit)(xb_read_filt_ctxt_t* ctxt);
|
||||
int64_t* read_batch_start,
|
||||
int64_t* read_batch_len);
|
||||
};
|
||||
|
||||
extern xb_read_filt_t rf_pass_through;
|
||||
extern xb_read_filt_t rf_bitmap;
|
||||
|
||||
#endif
|
||||
|
50
extra/mariabackup/thread_pool.cc
Normal file
50
extra/mariabackup/thread_pool.cc
Normal file
@ -0,0 +1,50 @@
|
||||
#include "thread_pool.h"
|
||||
#include "common.h"
|
||||
|
||||
bool ThreadPool::start(size_t threads_count) {
|
||||
if (!m_stopped)
|
||||
return false;
|
||||
m_stopped = false;
|
||||
for (unsigned i = 0; i < threads_count; ++i)
|
||||
m_threads.emplace_back(&ThreadPool::thread_func, this, i);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ThreadPool::stop() {
|
||||
if (m_stopped)
|
||||
return;
|
||||
m_stop = true;
|
||||
m_cv.notify_all();
|
||||
for (auto &t : m_threads)
|
||||
t.join();
|
||||
m_stopped = true;
|
||||
};
|
||||
|
||||
void ThreadPool::push(ThreadPool::job_t &&j) {
|
||||
std::unique_lock<std::mutex> lock(m_mutex);
|
||||
m_jobs.push(j);
|
||||
lock.unlock();
|
||||
m_cv.notify_one();
|
||||
}
|
||||
|
||||
void ThreadPool::thread_func(unsigned thread_num) {
|
||||
if (my_thread_init())
|
||||
die("Can't init mysql thread");
|
||||
std::unique_lock<std::mutex> lock(m_mutex);
|
||||
while(true) {
|
||||
if (m_stop)
|
||||
goto exit;
|
||||
while (!m_jobs.empty()) {
|
||||
if (m_stop)
|
||||
goto exit;
|
||||
job_t j = std::move(m_jobs.front());
|
||||
m_jobs.pop();
|
||||
lock.unlock();
|
||||
j(thread_num);
|
||||
lock.lock();
|
||||
}
|
||||
m_cv.wait(lock, [&] { return m_stop || !m_jobs.empty(); });
|
||||
}
|
||||
exit:
|
||||
my_thread_end();
|
||||
}
|
62
extra/mariabackup/thread_pool.h
Normal file
62
extra/mariabackup/thread_pool.h
Normal file
@ -0,0 +1,62 @@
|
||||
#pragma once
|
||||
#include <queue>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <atomic>
|
||||
#include "trx0sys.h"
|
||||
|
||||
class ThreadPool {
|
||||
public:
|
||||
typedef std::function<void(unsigned)> job_t;
|
||||
|
||||
ThreadPool() { m_stop = false; m_stopped = true; }
|
||||
ThreadPool (ThreadPool &&other) = delete;
|
||||
ThreadPool & operator= (ThreadPool &&other) = delete;
|
||||
ThreadPool(const ThreadPool &) = delete;
|
||||
ThreadPool & operator= (const ThreadPool &) = delete;
|
||||
|
||||
bool start(size_t threads_count);
|
||||
void stop();
|
||||
void push(job_t &&j);
|
||||
size_t threads_count() const { return m_threads.size(); }
|
||||
private:
|
||||
void thread_func(unsigned thread_num);
|
||||
std::mutex m_mutex;
|
||||
std::condition_variable m_cv;
|
||||
std::queue<job_t> m_jobs;
|
||||
std::atomic<bool> m_stop;
|
||||
std::atomic<bool> m_stopped;
|
||||
std::vector<std::thread> m_threads;
|
||||
};
|
||||
|
||||
class TasksGroup {
|
||||
public:
|
||||
TasksGroup(ThreadPool &thread_pool) : m_thread_pool(thread_pool) {
|
||||
m_tasks_count = 0;
|
||||
m_tasks_result = 1;
|
||||
}
|
||||
void push_task(ThreadPool::job_t &&j) {
|
||||
++m_tasks_count;
|
||||
m_thread_pool.push(std::forward<ThreadPool::job_t>(j));
|
||||
}
|
||||
void finish_task(int res) {
|
||||
--m_tasks_count;
|
||||
m_tasks_result.fetch_and(res);
|
||||
}
|
||||
int get_result() const { return m_tasks_result; }
|
||||
bool is_finished() const {
|
||||
return !m_tasks_count;
|
||||
}
|
||||
bool wait_for_finish() {
|
||||
while (!is_finished())
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
return get_result();
|
||||
}
|
||||
private:
|
||||
ThreadPool &m_thread_pool;
|
||||
std::atomic<size_t> m_tasks_count;
|
||||
std::atomic<int> m_tasks_result;
|
||||
};
|
@ -144,6 +144,18 @@ wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check whether TRX_SYS page has been changed */
|
||||
if (mach_read_from_4(page + FIL_PAGE_SPACE_ID)
|
||||
== TRX_SYS_SPACE
|
||||
&& mach_read_from_4(page + FIL_PAGE_OFFSET)
|
||||
== TRX_SYS_PAGE_NO) {
|
||||
msg(cursor->thread_n,
|
||||
"--incremental backup is impossible if "
|
||||
"the server had been restarted with "
|
||||
"different innodb_undo_tablespaces.");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* updated page */
|
||||
if (cp->npages == page_size / 4) {
|
||||
/* flush buffer */
|
||||
|
@ -52,10 +52,12 @@ permission notice:
|
||||
#include <wsrep_api.h>
|
||||
|
||||
/*! Name of file where Galera info is stored on recovery */
|
||||
#define XB_GALERA_INFO_FILENAME "xtrabackup_galera_info"
|
||||
#define MB_GALERA_INFO_FILENAME "mariadb_backup_galera_info"
|
||||
#define XB_GALERA_DONOR_INFO_FILENAME "donor_galera_info"
|
||||
|
||||
/* backup copy of galera info file as sent by donor */
|
||||
#define MB_GALERA_INFO_FILENAME_SST "mariadb_backup_galera_info_SST"
|
||||
|
||||
/***********************************************************************
|
||||
Store Galera checkpoint info in the MB_GALERA_INFO_FILENAME file, if that
|
||||
information is present in the trx system header. Otherwise, do nothing. */
|
||||
@ -69,20 +71,45 @@ xb_write_galera_info(bool incremental_prepare)
|
||||
long long seqno;
|
||||
MY_STAT statinfo;
|
||||
|
||||
/* Do not overwrite an existing file to be compatible with
|
||||
servers with older server versions */
|
||||
if (!incremental_prepare &&
|
||||
(my_stat(XB_GALERA_INFO_FILENAME, &statinfo, MYF(0)) != NULL ||
|
||||
my_stat(MB_GALERA_INFO_FILENAME, &statinfo, MYF(0)) != NULL)) {
|
||||
xid.null();
|
||||
|
||||
/* try to read last wsrep XID from innodb rsegs, we will use it
|
||||
instead of galera info file received from donor
|
||||
*/
|
||||
if (!trx_rseg_read_wsrep_checkpoint(xid)) {
|
||||
/* no worries yet, SST may have brought in galera info file
|
||||
from some old MariaDB version, which does not support
|
||||
wsrep XID storing in innodb rsegs
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
xid.null();
|
||||
/* if SST brought in galera info file, copy it as *_SST file
|
||||
this will not be used, saved just for future reference
|
||||
*/
|
||||
if (my_stat(MB_GALERA_INFO_FILENAME, &statinfo, MYF(0))) {
|
||||
FILE* fp_in = fopen(MB_GALERA_INFO_FILENAME, "r");
|
||||
FILE* fp_out = fopen(MB_GALERA_INFO_FILENAME_SST, "w");
|
||||
|
||||
if (!trx_rseg_read_wsrep_checkpoint(xid)) {
|
||||
|
||||
return;
|
||||
char buf[BUFSIZ] = {'\0'};
|
||||
size_t size;
|
||||
while ((size = fread(buf, 1, BUFSIZ, fp_in))) {
|
||||
if (fwrite(buf, 1, size, fp_out) != strlen(buf)) {
|
||||
die(
|
||||
"could not write to "
|
||||
MB_GALERA_INFO_FILENAME_SST
|
||||
", errno = %d\n",
|
||||
errno);
|
||||
}
|
||||
}
|
||||
if (!feof(fp_in)) {
|
||||
die(
|
||||
MB_GALERA_INFO_FILENAME_SST
|
||||
" not fully copied\n"
|
||||
);
|
||||
}
|
||||
fclose(fp_out);
|
||||
fclose(fp_in);
|
||||
}
|
||||
|
||||
wsrep_uuid_t uuid;
|
||||
@ -99,7 +126,6 @@ xb_write_galera_info(bool incremental_prepare)
|
||||
"could not create " MB_GALERA_INFO_FILENAME
|
||||
", errno = %d\n",
|
||||
errno);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
seqno = wsrep_xid_seqno(&xid);
|
||||
|
@ -1,5 +0,0 @@
|
||||
#include <mysql.h>
|
||||
#include <string>
|
||||
extern void xb_plugin_backup_init(MYSQL *mysql);
|
||||
extern const char* xb_plugin_get_config();
|
||||
extern void xb_plugin_prepare_init(int argc, char **argv, const char *dir);
|
@ -255,7 +255,7 @@ mode_create(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
stream = xb_stream_write_new();
|
||||
stream = xb_stream_write_new(nullptr, nullptr);
|
||||
if (stream == NULL) {
|
||||
msg("%s: xb_stream_write_new() failed.", my_progname);
|
||||
return 1;
|
||||
@ -280,7 +280,7 @@ mode_create(int argc, char **argv)
|
||||
goto err;
|
||||
}
|
||||
|
||||
file = xb_stream_write_open(stream, filepath, &mystat, NULL, NULL);
|
||||
file = xb_stream_write_open(stream, filepath, &mystat, false);
|
||||
if (file == NULL) {
|
||||
goto err;
|
||||
}
|
||||
@ -307,7 +307,8 @@ err:
|
||||
|
||||
static
|
||||
file_entry_t *
|
||||
file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen)
|
||||
file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen,
|
||||
uchar chunk_flags)
|
||||
{
|
||||
file_entry_t *entry;
|
||||
ds_file_t *file;
|
||||
@ -324,7 +325,8 @@ file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen)
|
||||
}
|
||||
entry->pathlen = pathlen;
|
||||
|
||||
file = ds_open(ctxt->ds_ctxt, path, NULL);
|
||||
file = ds_open(ctxt->ds_ctxt, path, NULL,
|
||||
chunk_flags == XB_STREAM_FLAG_REWRITE);
|
||||
|
||||
if (file == NULL) {
|
||||
msg("%s: failed to create file.", my_progname);
|
||||
@ -405,10 +407,50 @@ extract_worker_thread_func(void *arg)
|
||||
(uchar *) chunk.path,
|
||||
chunk.pathlen);
|
||||
|
||||
if (entry && (chunk.type == XB_CHUNK_TYPE_REMOVE ||
|
||||
chunk.type == XB_CHUNK_TYPE_RENAME)) {
|
||||
msg("%s: rename and remove chunks can not be applied to opened file: %s",
|
||||
my_progname, chunk.path);
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
if (chunk.type == XB_CHUNK_TYPE_REMOVE) {
|
||||
if (ds_remove(ctxt->ds_ctxt, chunk.path)) {
|
||||
msg("%s: error on file removing: %s", my_progname, chunk.path);
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
res = XB_STREAM_READ_ERROR;
|
||||
break;
|
||||
}
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (chunk.type == XB_CHUNK_TYPE_RENAME) {
|
||||
if (my_hash_search(ctxt->filehash,
|
||||
reinterpret_cast<const uchar *>(chunk.data), chunk.length)) {
|
||||
msg("%s: rename chunks can not be applied to opened file: %s",
|
||||
my_progname, reinterpret_cast<const uchar *>(chunk.data));
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
break;
|
||||
}
|
||||
if (ds_rename(ctxt->ds_ctxt, chunk.path,
|
||||
reinterpret_cast<const char *>(chunk.data))) {
|
||||
msg("%s: error on file renaming: %s to %s", my_progname,
|
||||
reinterpret_cast<const char *>(chunk.data), chunk.path);
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
res = XB_STREAM_READ_ERROR;
|
||||
break;
|
||||
}
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry == NULL) {
|
||||
entry = file_entry_new(ctxt,
|
||||
chunk.path,
|
||||
chunk.pathlen);
|
||||
chunk.pathlen,
|
||||
chunk.flags);
|
||||
if (entry == NULL) {
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
break;
|
||||
@ -425,6 +467,18 @@ extract_worker_thread_func(void *arg)
|
||||
|
||||
pthread_mutex_unlock(ctxt->mutex);
|
||||
|
||||
if (chunk.type == XB_CHUNK_TYPE_SEEK) {
|
||||
if (ds_seek_set(entry->file, chunk.offset)) {
|
||||
msg("%s: my_seek() failed.", my_progname);
|
||||
pthread_mutex_unlock(&entry->mutex);
|
||||
res = XB_STREAM_READ_ERROR;
|
||||
break;
|
||||
}
|
||||
entry->offset = chunk.offset;
|
||||
pthread_mutex_unlock(&entry->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
res = xb_stream_validate_checksum(&chunk);
|
||||
|
||||
if (res != XB_STREAM_READ_CHUNK) {
|
||||
|
@ -29,6 +29,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
/* Chunk flags */
|
||||
/* Chunk can be ignored if unknown version/format */
|
||||
#define XB_STREAM_FLAG_IGNORABLE 0x01
|
||||
#define XB_STREAM_FLAG_REWRITE 0x02
|
||||
|
||||
/* Magic + flags + type + path len */
|
||||
#define CHUNK_HEADER_CONSTANT_LEN ((sizeof(XB_STREAM_CHUNK_MAGIC) - 1) + \
|
||||
@ -48,18 +49,21 @@ typedef enum {
|
||||
/************************************************************************
|
||||
Write interface. */
|
||||
|
||||
typedef ssize_t xb_stream_write_callback(xb_wstream_file_t *file,
|
||||
typedef ssize_t xb_stream_write_callback(
|
||||
void *userdata,
|
||||
const void *buf, size_t len);
|
||||
|
||||
xb_wstream_t *xb_stream_write_new(void);
|
||||
|
||||
xb_wstream_t *xb_stream_write_new(
|
||||
xb_stream_write_callback *write_callback, void *user_data);
|
||||
xb_wstream_file_t *xb_stream_write_open(xb_wstream_t *stream, const char *path,
|
||||
MY_STAT *mystat, void *userdata,
|
||||
xb_stream_write_callback *onwrite);
|
||||
const MY_STAT *mystat, bool rewrite);
|
||||
|
||||
int xb_stream_write_data(xb_wstream_file_t *file, const void *buf, size_t len);
|
||||
|
||||
int xb_stream_write_seek_set(xb_wstream_file_t *file, my_off_t offset);
|
||||
int xb_stream_write_remove(xb_wstream_t *stream, const char *path);
|
||||
int
|
||||
xb_stream_write_rename(
|
||||
xb_wstream_t *stream, const char *old_path, const char *new_path);
|
||||
int xb_stream_write_close(xb_wstream_file_t *file);
|
||||
|
||||
int xb_stream_write_done(xb_wstream_t *stream);
|
||||
@ -76,6 +80,9 @@ typedef enum {
|
||||
typedef enum {
|
||||
XB_CHUNK_TYPE_UNKNOWN = '\0',
|
||||
XB_CHUNK_TYPE_PAYLOAD = 'P',
|
||||
XB_CHUNK_TYPE_RENAME = 'R',
|
||||
XB_CHUNK_TYPE_REMOVE = 'D',
|
||||
XB_CHUNK_TYPE_SEEK = 'S',
|
||||
XB_CHUNK_TYPE_EOF = 'E'
|
||||
} xb_chunk_type_t;
|
||||
|
||||
|
@ -59,6 +59,9 @@ validate_chunk_type(uchar code)
|
||||
{
|
||||
switch ((xb_chunk_type_t) code) {
|
||||
case XB_CHUNK_TYPE_PAYLOAD:
|
||||
case XB_CHUNK_TYPE_RENAME:
|
||||
case XB_CHUNK_TYPE_REMOVE:
|
||||
case XB_CHUNK_TYPE_SEEK:
|
||||
case XB_CHUNK_TYPE_EOF:
|
||||
return (xb_chunk_type_t) code;
|
||||
default:
|
||||
@ -159,57 +162,91 @@ xb_stream_read_chunk(xb_rstream_t *stream, xb_rstream_chunk_t *chunk)
|
||||
}
|
||||
chunk->path[pathlen] = '\0';
|
||||
|
||||
if (chunk->type == XB_CHUNK_TYPE_EOF) {
|
||||
if (chunk->type == XB_CHUNK_TYPE_EOF ||
|
||||
chunk->type == XB_CHUNK_TYPE_REMOVE) {
|
||||
return XB_STREAM_READ_CHUNK;
|
||||
}
|
||||
|
||||
/* Payload length */
|
||||
F_READ(tmpbuf, 16);
|
||||
ullval = uint8korr(tmpbuf);
|
||||
if (ullval > (ulonglong) SIZE_T_MAX) {
|
||||
msg("xb_stream_read_chunk(): chunk length is too large at "
|
||||
"offset 0x%llx: 0x%llx.", (ulonglong) stream->offset,
|
||||
ullval);
|
||||
goto err;
|
||||
}
|
||||
chunk->length = (size_t) ullval;
|
||||
stream->offset += 8;
|
||||
|
||||
/* Payload offset */
|
||||
ullval = uint8korr(tmpbuf + 8);
|
||||
if (ullval > (ulonglong) MY_OFF_T_MAX) {
|
||||
msg("xb_stream_read_chunk(): chunk offset is too large at "
|
||||
"offset 0x%llx: 0x%llx.", (ulonglong) stream->offset,
|
||||
ullval);
|
||||
goto err;
|
||||
}
|
||||
chunk->offset = (my_off_t) ullval;
|
||||
stream->offset += 8;
|
||||
|
||||
/* Reallocate the buffer if needed */
|
||||
if (chunk->length > chunk->buflen) {
|
||||
chunk->data = my_realloc(PSI_NOT_INSTRUMENTED, chunk->data, chunk->length,
|
||||
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
|
||||
if (chunk->data == NULL) {
|
||||
msg("xb_stream_read_chunk(): failed to increase buffer "
|
||||
"to %lu bytes.", (ulong) chunk->length);
|
||||
if (chunk->type == XB_CHUNK_TYPE_RENAME) {
|
||||
F_READ(tmpbuf, 4);
|
||||
size_t new_pathlen = uint4korr(tmpbuf);
|
||||
if (new_pathlen >= FN_REFLEN) {
|
||||
msg("xb_stream_read_chunk(): path length (%lu) for new name of 'rename'"
|
||||
" chunk is too large", (ulong) new_pathlen);
|
||||
goto err;
|
||||
}
|
||||
chunk->buflen = chunk->length;
|
||||
chunk->length = new_pathlen;
|
||||
stream->offset +=4;
|
||||
}
|
||||
else if (chunk->type == XB_CHUNK_TYPE_SEEK) {
|
||||
F_READ(tmpbuf, 8);
|
||||
chunk->offset = uint8korr(tmpbuf);
|
||||
stream->offset += 8;
|
||||
return XB_STREAM_READ_CHUNK;
|
||||
}
|
||||
else {
|
||||
/* Payload length */
|
||||
F_READ(tmpbuf, 16);
|
||||
ullval = uint8korr(tmpbuf);
|
||||
if (ullval > (ulonglong) SIZE_T_MAX) {
|
||||
msg("xb_stream_read_chunk(): chunk length is too large at "
|
||||
"offset 0x%llx: 0x%llx.", (ulonglong) stream->offset,
|
||||
ullval);
|
||||
goto err;
|
||||
}
|
||||
chunk->length = (size_t) ullval;
|
||||
stream->offset += 8;
|
||||
|
||||
/* Payload offset */
|
||||
ullval = uint8korr(tmpbuf + 8);
|
||||
if (ullval > (ulonglong) MY_OFF_T_MAX) {
|
||||
msg("xb_stream_read_chunk(): chunk offset is too large at "
|
||||
"offset 0x%llx: 0x%llx.", (ulonglong) stream->offset,
|
||||
ullval);
|
||||
goto err;
|
||||
}
|
||||
chunk->offset = (my_off_t) ullval;
|
||||
stream->offset += 8;
|
||||
}
|
||||
|
||||
/* Checksum */
|
||||
F_READ(tmpbuf, 4);
|
||||
chunk->checksum = uint4korr(tmpbuf);
|
||||
chunk->checksum_offset = stream->offset;
|
||||
/* Reallocate the buffer if needed, take into account trailing '\0' for
|
||||
new file name in the case of XB_CHUNK_TYPE_RENAME */
|
||||
if (chunk->length + 1 > chunk->buflen) {
|
||||
chunk->data = my_realloc(PSI_NOT_INSTRUMENTED, chunk->data,
|
||||
chunk->length + 1, MYF(MY_WME | MY_ALLOW_ZERO_PTR));
|
||||
if (chunk->data == NULL) {
|
||||
msg("xb_stream_read_chunk(): failed to increase buffer "
|
||||
"to %lu bytes.", (ulong) chunk->length + 1);
|
||||
goto err;
|
||||
}
|
||||
chunk->buflen = chunk->length + 1;
|
||||
}
|
||||
|
||||
/* Payload */
|
||||
if (chunk->length > 0) {
|
||||
if (chunk->type == XB_CHUNK_TYPE_RENAME) {
|
||||
if (chunk->length == 0) {
|
||||
msg("xb_stream_read_chunk(): failed to read new name for file to rename "
|
||||
": %s", chunk->path);
|
||||
goto err;
|
||||
}
|
||||
F_READ(chunk->data, chunk->length);
|
||||
stream->offset += chunk->length;
|
||||
reinterpret_cast<char *>(chunk->data)[chunk->length] = '\0';
|
||||
++chunk->length;
|
||||
}
|
||||
else {
|
||||
/* Checksum */
|
||||
F_READ(tmpbuf, 4);
|
||||
chunk->checksum = uint4korr(tmpbuf);
|
||||
chunk->checksum_offset = stream->offset;
|
||||
|
||||
stream->offset += 4;
|
||||
/* Payload */
|
||||
if (chunk->length > 0) {
|
||||
F_READ(chunk->data, chunk->length);
|
||||
stream->offset += chunk->length;
|
||||
}
|
||||
|
||||
stream->offset += 4;
|
||||
}
|
||||
|
||||
return XB_STREAM_READ_CHUNK;
|
||||
|
||||
|
@ -21,6 +21,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include <my_global.h>
|
||||
#include <my_base.h>
|
||||
#include <zlib.h>
|
||||
#include <stdint.h>
|
||||
#include "common.h"
|
||||
#include "xbstream.h"
|
||||
|
||||
@ -29,6 +30,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
struct xb_wstream_struct {
|
||||
pthread_mutex_t mutex;
|
||||
xb_stream_write_callback *write;
|
||||
void *user_data;
|
||||
};
|
||||
|
||||
struct xb_wstream_file_struct {
|
||||
@ -39,8 +42,7 @@ struct xb_wstream_file_struct {
|
||||
char *chunk_ptr;
|
||||
size_t chunk_free;
|
||||
my_off_t offset;
|
||||
void *userdata;
|
||||
xb_stream_write_callback *write;
|
||||
bool rewrite;
|
||||
};
|
||||
|
||||
static int xb_stream_flush(xb_wstream_file_t *file);
|
||||
@ -50,7 +52,7 @@ static int xb_stream_write_eof(xb_wstream_file_t *file);
|
||||
|
||||
static
|
||||
ssize_t
|
||||
xb_stream_default_write_callback(xb_wstream_file_t *file __attribute__((unused)),
|
||||
xb_stream_default_write_callback(
|
||||
void *userdata __attribute__((unused)),
|
||||
const void *buf, size_t len)
|
||||
{
|
||||
@ -60,21 +62,31 @@ xb_stream_default_write_callback(xb_wstream_file_t *file __attribute__((unused))
|
||||
}
|
||||
|
||||
xb_wstream_t *
|
||||
xb_stream_write_new(void)
|
||||
xb_stream_write_new(
|
||||
xb_stream_write_callback *write_callback, void *user_data)
|
||||
{
|
||||
xb_wstream_t *stream;
|
||||
|
||||
stream = (xb_wstream_t *) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(xb_wstream_t), MYF(MY_FAE));
|
||||
pthread_mutex_init(&stream->mutex, NULL);
|
||||
if (write_callback) {
|
||||
#ifdef _WIN32
|
||||
setmode(fileno(stdout), _O_BINARY);
|
||||
#endif
|
||||
stream->write = write_callback;
|
||||
stream->user_data = user_data;
|
||||
}
|
||||
else {
|
||||
stream->write = xb_stream_default_write_callback;
|
||||
stream->user_data = user_data;
|
||||
}
|
||||
|
||||
return stream;;
|
||||
}
|
||||
|
||||
xb_wstream_file_t *
|
||||
xb_stream_write_open(xb_wstream_t *stream, const char *path,
|
||||
MY_STAT *mystat __attribute__((unused)),
|
||||
void *userdata,
|
||||
xb_stream_write_callback *onwrite)
|
||||
const MY_STAT *mystat __attribute__((unused)), bool rewrite)
|
||||
{
|
||||
xb_wstream_file_t *file;
|
||||
size_t path_len;
|
||||
@ -109,16 +121,7 @@ xb_stream_write_open(xb_wstream_t *stream, const char *path,
|
||||
file->offset = 0;
|
||||
file->chunk_ptr = file->chunk;
|
||||
file->chunk_free = XB_STREAM_MIN_CHUNK_SIZE;
|
||||
if (onwrite) {
|
||||
#ifdef _WIN32
|
||||
setmode(fileno(stdout), _O_BINARY);
|
||||
#endif
|
||||
file->userdata = userdata;
|
||||
file->write = onwrite;
|
||||
} else {
|
||||
file->userdata = NULL;
|
||||
file->write = xb_stream_default_write_callback;
|
||||
}
|
||||
file->rewrite = rewrite;
|
||||
|
||||
return file;
|
||||
}
|
||||
@ -202,7 +205,8 @@ xb_stream_write_chunk(xb_wstream_file_t *file, const void *buf, size_t len)
|
||||
memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
|
||||
ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
|
||||
|
||||
*ptr++ = 0; /* Chunk flags */
|
||||
*ptr++ =
|
||||
file->rewrite ? XB_STREAM_FLAG_REWRITE : 0; /* Chunk flags */
|
||||
|
||||
*ptr++ = (uchar) XB_CHUNK_TYPE_PAYLOAD; /* Chunk type */
|
||||
|
||||
@ -227,11 +231,11 @@ xb_stream_write_chunk(xb_wstream_file_t *file, const void *buf, size_t len)
|
||||
|
||||
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
|
||||
|
||||
if (file->write(file, file->userdata, tmpbuf, ptr-tmpbuf) == -1)
|
||||
if (stream->write(stream->user_data, tmpbuf, ptr-tmpbuf) == -1)
|
||||
goto err;
|
||||
|
||||
|
||||
if (file->write(file, file->userdata, buf, len) == -1) /* Payload */
|
||||
if (stream->write(stream->user_data, buf, len) == -1) /* Payload */
|
||||
goto err;
|
||||
|
||||
file->offset+= len;
|
||||
@ -247,6 +251,38 @@ err:
|
||||
return 1;
|
||||
}
|
||||
|
||||
int xb_stream_write_seek_set(xb_wstream_file_t *file, my_off_t offset)
|
||||
{
|
||||
/* Chunk magic + flags + chunk type + path_len + path + offset */
|
||||
uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 +
|
||||
FN_REFLEN + 8];
|
||||
int error = 0;
|
||||
xb_wstream_t *stream = file->stream;
|
||||
uchar *ptr = tmpbuf;
|
||||
/* Chunk magic */
|
||||
memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
|
||||
ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
|
||||
*ptr++ = 0; /* Chunk flags */
|
||||
*ptr++ = (uchar) XB_CHUNK_TYPE_SEEK; /* Chunk type */
|
||||
int4store(ptr, file->path_len); /* Path length */
|
||||
ptr += 4;
|
||||
memcpy(ptr, file->path, file->path_len); /* Path */
|
||||
ptr += file->path_len;
|
||||
int8store(ptr, static_cast<int64_t>(offset)); /* Offset */
|
||||
ptr += 8;
|
||||
if (xb_stream_flush(file))
|
||||
return 1;
|
||||
pthread_mutex_lock(&stream->mutex);
|
||||
if (stream->write(stream->user_data, tmpbuf, ptr-tmpbuf) == -1)
|
||||
error = 1;
|
||||
if (!error)
|
||||
file->offset = offset;
|
||||
pthread_mutex_unlock(&stream->mutex);
|
||||
if (xb_stream_flush(file))
|
||||
return 1;
|
||||
return error;
|
||||
}
|
||||
|
||||
static
|
||||
int
|
||||
xb_stream_write_eof(xb_wstream_file_t *file)
|
||||
@ -278,7 +314,7 @@ xb_stream_write_eof(xb_wstream_file_t *file)
|
||||
|
||||
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
|
||||
|
||||
if (file->write(file, file->userdata, tmpbuf,
|
||||
if (stream->write(stream->user_data, tmpbuf,
|
||||
(ulonglong) (ptr - tmpbuf)) == -1)
|
||||
goto err;
|
||||
|
||||
@ -291,3 +327,77 @@ err:
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
xb_stream_write_remove(xb_wstream_t *stream, const char *path) {
|
||||
/* Chunk magic + flags + chunk type + path_len + path */
|
||||
uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 + FN_REFLEN];
|
||||
uchar *ptr = tmpbuf;
|
||||
/* Chunk magic */
|
||||
memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
|
||||
ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
|
||||
|
||||
*ptr++ = 0; /* Chunk flags */
|
||||
|
||||
*ptr++ = (uchar) XB_CHUNK_TYPE_REMOVE; /* Chunk type */
|
||||
size_t path_len = strlen(path);
|
||||
int4store(ptr, path_len); /* Path length */
|
||||
ptr += 4;
|
||||
|
||||
memcpy(ptr, path, path_len); /* Path */
|
||||
ptr += path_len;
|
||||
|
||||
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
|
||||
|
||||
pthread_mutex_lock(&stream->mutex);
|
||||
|
||||
ssize_t result = stream->write(stream->user_data, tmpbuf,
|
||||
(ulonglong) (ptr - tmpbuf));
|
||||
|
||||
pthread_mutex_unlock(&stream->mutex);
|
||||
|
||||
return result < 0;
|
||||
|
||||
}
|
||||
|
||||
int
|
||||
xb_stream_write_rename(
|
||||
xb_wstream_t *stream, const char *old_path, const char *new_path) {
|
||||
/* Chunk magic + flags + chunk type + path_len + path + path_len + path*/
|
||||
uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 +
|
||||
4 + FN_REFLEN + 4 + FN_REFLEN];
|
||||
uchar *ptr = tmpbuf;
|
||||
/* Chunk magic */
|
||||
memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
|
||||
ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
|
||||
|
||||
*ptr++ = 0; /* Chunk flags */
|
||||
|
||||
*ptr++ = (uchar) XB_CHUNK_TYPE_RENAME; /* Chunk type */
|
||||
size_t path_len = strlen(old_path);
|
||||
int4store(ptr, path_len); /* Path length */
|
||||
ptr += 4;
|
||||
|
||||
memcpy(ptr, old_path, path_len); /* Path */
|
||||
ptr += path_len;
|
||||
|
||||
path_len = strlen(new_path);
|
||||
int4store(ptr, path_len); /* Path length */
|
||||
ptr += 4;
|
||||
|
||||
memcpy(ptr, new_path, path_len); /* Path */
|
||||
ptr += path_len;
|
||||
|
||||
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
|
||||
|
||||
pthread_mutex_lock(&stream->mutex);
|
||||
|
||||
ssize_t result = stream->write(stream->user_data, tmpbuf,
|
||||
(ulonglong) (ptr - tmpbuf));
|
||||
|
||||
pthread_mutex_unlock(&stream->mutex);
|
||||
|
||||
return result < 0;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,8 +24,15 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include <my_getopt.h>
|
||||
#include "datasink.h"
|
||||
#include "xbstream.h"
|
||||
#include "changed_page_bitmap.h"
|
||||
#include "fil0fil.h"
|
||||
#include <set>
|
||||
#include "handler.h"
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <tuple>
|
||||
#include <functional>
|
||||
|
||||
|
||||
#define XB_TOOL_NAME "mariadb-backup"
|
||||
#define XB_HISTORY_TABLE "mysql.mariadb_backup_history"
|
||||
@ -84,8 +91,6 @@ extern my_bool xb_backup_rocksdb;
|
||||
|
||||
extern uint opt_protocol;
|
||||
|
||||
extern xb_page_bitmap *changed_page_bitmap;
|
||||
|
||||
extern char *xtrabackup_incremental;
|
||||
extern my_bool xtrabackup_incremental_force_scan;
|
||||
|
||||
@ -112,7 +117,7 @@ extern my_bool xtrabackup_decrypt_decompress;
|
||||
extern char *innobase_data_file_path;
|
||||
extern longlong innobase_page_size;
|
||||
|
||||
extern int xtrabackup_parallel;
|
||||
extern uint xtrabackup_parallel;
|
||||
|
||||
extern my_bool xb_close_files;
|
||||
extern const char *xtrabackup_compress_alg;
|
||||
@ -131,7 +136,6 @@ extern my_bool opt_galera_info;
|
||||
extern my_bool opt_slave_info;
|
||||
extern my_bool opt_no_lock;
|
||||
extern my_bool opt_safe_slave_backup;
|
||||
extern my_bool opt_rsync;
|
||||
extern my_bool opt_force_non_empty_dirs;
|
||||
extern my_bool opt_noversioncheck;
|
||||
extern my_bool opt_no_backup_locks;
|
||||
@ -288,15 +292,40 @@ fil_file_readdir_next_file(
|
||||
os_file_stat_t* info); /*!< in/out: buffer where the
|
||||
info is returned */
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
#include <fil0fil.h>
|
||||
extern void dbug_mariabackup_event(const char *event,
|
||||
const fil_space_t::name_type key);
|
||||
const char *convert_dst(const char *dst);
|
||||
|
||||
#define DBUG_MARIABACKUP_EVENT(A, B) \
|
||||
DBUG_EXECUTE_IF("mariabackup_events", dbug_mariabackup_event(A, B);)
|
||||
#else
|
||||
#define DBUG_MARIABACKUP_EVENT(A, B) /* empty */
|
||||
#endif // DBUG_OFF
|
||||
std::string get_table_version_from_image(const std::vector<uchar> &frm_image);
|
||||
std::pair<bool, legacy_db_type>
|
||||
get_table_engine_from_image(const std::vector<uchar> &frm_image);
|
||||
std::string read_table_version_id(File file);
|
||||
|
||||
std::string convert_tablename_to_filepath(
|
||||
const char *data_dir_path, const std::string &db, const std::string &table);
|
||||
|
||||
std::tuple<std::string, std::string, std::string>
|
||||
convert_filepath_to_tablename(const char *filepath);
|
||||
|
||||
typedef std::string table_key_t;
|
||||
|
||||
inline table_key_t table_key(const std::string &db, const std::string &table) {
|
||||
return std::string(db).append(".").append(table);
|
||||
};
|
||||
|
||||
inline table_key_t table_key(const char *db, const char *table) {
|
||||
return std::string(db).append(".").append(table);
|
||||
};
|
||||
|
||||
typedef std::function<void(std::string, std::string, std::string)>
|
||||
post_copy_table_hook_t;
|
||||
|
||||
my_bool
|
||||
check_if_skip_table(
|
||||
/******************/
|
||||
const char* name); /*!< in: path to the table */
|
||||
|
||||
bool is_log_table(const char *dbname, const char *tablename);
|
||||
bool is_stats_table(const char *dbname, const char *tablename);
|
||||
|
||||
extern my_bool xtrabackup_copy_back;
|
||||
extern my_bool xtrabackup_move_back;
|
||||
#endif /* XB_XTRABACKUP_H */
|
||||
|
@ -1,86 +1,57 @@
|
||||
IF(MSVC_INTEL)
|
||||
PROJECT(wolfssl C ASM_MASM)
|
||||
ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64")
|
||||
PROJECT(wolfssl C ASM)
|
||||
PROJECT(wolfssl C ASM)
|
||||
ELSE()
|
||||
PROJECT(wolfssl C)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_SIZEOF_VOID_P MATCHES 8)
|
||||
IF(MSVC_INTEL)
|
||||
IF(MSVC_INTEL AND NOT (CMAKE_C_COMPILER_ID MATCHES Clang))
|
||||
SET(WOLFSSL_INTELASM ON)
|
||||
SET(WOLFSSL_X86_64_BUILD 1)
|
||||
SET(HAVE_INTEL_RDSEED 1)
|
||||
SET(HAVE_INTEL_RDRAND 1)
|
||||
ELSEIF(CMAKE_ASM_COMPILER_ID MATCHES "Clang" AND CMAKE_VERSION VERSION_LESS 3.16)
|
||||
|
||||
# WolfSSL 5.5.4 bug workaround below does not work, due to some CMake bug
|
||||
ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64")
|
||||
SET(WOLFSSL_X86_64_BUILD 1)
|
||||
IF(CMAKE_C_COMPILER_ID MATCHES GNU AND CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9)
|
||||
MESSAGE_ONCE(NO_INTEL_ASSEMBLY "Disable Intel assembly for WolfSSL - compiler is too old")
|
||||
ELSEIF(WITH_MSAN)
|
||||
MESSAGE_ONCE(MSAN_CANT_HANDLE_IT "Disable Intel assembly for WolfSSL - MSAN can't handle it")
|
||||
ELSE()
|
||||
IF(WITH_MSAN)
|
||||
MESSAGE_ONCE(MSAN_CANT_HANDLE_IT
|
||||
"Disable Intel assembly for WolfSSL - MSAN can't handle it")
|
||||
ELSE()
|
||||
MY_CHECK_C_COMPILER_FLAG(-maes)
|
||||
MY_CHECK_C_COMPILER_FLAG(-msse4)
|
||||
MY_CHECK_C_COMPILER_FLAG(-mpclmul)
|
||||
IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul)
|
||||
SET(WOLFSSL_INTELASM ON)
|
||||
MY_CHECK_C_COMPILER_FLAG(-maes)
|
||||
MY_CHECK_C_COMPILER_FLAG(-msse4)
|
||||
MY_CHECK_C_COMPILER_FLAG(-mpclmul)
|
||||
IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul)
|
||||
SET(WOLFSSL_INTELASM ON)
|
||||
MY_CHECK_C_COMPILER_FLAG(-mrdrnd)
|
||||
MY_CHECK_C_COMPILER_FLAG(-mrdseed)
|
||||
IF(have_C__mrdrnd)
|
||||
SET(HAVE_INTEL_RDRAND ON)
|
||||
ENDIF()
|
||||
IF(have_C__mrdseed)
|
||||
SET(HAVE_INTEL_RDSEED ON)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
MY_CHECK_C_COMPILER_FLAG(-mrdrnd)
|
||||
MY_CHECK_C_COMPILER_FLAG(-mrdseed)
|
||||
IF(have_C__mrdrnd)
|
||||
SET(HAVE_INTEL_RDRAND ON)
|
||||
ENDIF()
|
||||
IF(have_C__mrdseed)
|
||||
SET(HAVE_INTEL_RDSEED ON)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
SET(WOLFSSL_SRCDIR ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/src)
|
||||
ADD_DEFINITIONS(${SSL_DEFINES})
|
||||
|
||||
SET(WOLFSSL_SOURCES
|
||||
${WOLFSSL_SRCDIR}/crl.c
|
||||
${WOLFSSL_SRCDIR}/internal.c
|
||||
${WOLFSSL_SRCDIR}/keys.c
|
||||
${WOLFSSL_SRCDIR}/tls.c
|
||||
${WOLFSSL_SRCDIR}/wolfio.c
|
||||
${WOLFSSL_SRCDIR}/ocsp.c
|
||||
${WOLFSSL_SRCDIR}/ssl.c
|
||||
${WOLFSSL_SRCDIR}/tls13.c)
|
||||
|
||||
ADD_DEFINITIONS(-DWOLFSSL_LIB -DBUILDING_WOLFSSL)
|
||||
|
||||
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl)
|
||||
IF(MSVC)
|
||||
# size_t to long truncation warning
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4267 -wd4334 -wd4028 -wd4244")
|
||||
ENDIF()
|
||||
|
||||
ADD_CONVENIENCE_LIBRARY(wolfssl ${WOLFSSL_SOURCES})
|
||||
|
||||
# Workaround linker crash with older Ubuntu binutils
|
||||
# e.g aborting at ../../bfd/merge.c line 873 in _bfd_merged_section_offset
|
||||
IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
STRING(REPLACE "-g " "-g1 " CMAKE_C_FLAGS_RELWITHDEBINFO
|
||||
${CMAKE_C_FLAGS_RELWITHDEBINFO})
|
||||
STRING(REPLACE "-g " "-g1 " CMAKE_C_FLAGS_DEBUG
|
||||
${CMAKE_C_FLAGS_DEBUG})
|
||||
STRING(REPLACE "-ggdb3 " " " CMAKE_C_FLAGS_RELWITHDEBINFO
|
||||
${CMAKE_C_FLAGS_RELWITHDEBINFO})
|
||||
STRING(REPLACE "-ggdb3 " " " CMAKE_C_FLAGS_DEBUG
|
||||
${CMAKE_C_FLAGS_DEBUG})
|
||||
ENDIF()
|
||||
|
||||
SET(WOLFCRYPT_SRCDIR ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/wolfcrypt/src)
|
||||
SET(WOLFCRYPT_SOURCES
|
||||
ADD_DEFINITIONS(${SSL_DEFINES})
|
||||
ADD_DEFINITIONS(-DWOLFSSL_LIB -DBUILDING_WOLFSSL)
|
||||
ADD_DEFINITIONS(-DWOLFSSL_SP_4096)
|
||||
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl)
|
||||
INCLUDE_DIRECTORIES(${SSL_INCLUDE_DIRS})
|
||||
|
||||
add_library(wolfssl STATIC
|
||||
${WOLFSSL_SRCDIR}/crl.c
|
||||
${WOLFSSL_SRCDIR}/internal.c
|
||||
${WOLFSSL_SRCDIR}/keys.c
|
||||
${WOLFSSL_SRCDIR}/tls.c
|
||||
${WOLFSSL_SRCDIR}/wolfio.c
|
||||
${WOLFSSL_SRCDIR}/ocsp.c
|
||||
${WOLFSSL_SRCDIR}/ssl.c
|
||||
${WOLFSSL_SRCDIR}/tls13.c
|
||||
${WOLFCRYPT_SRCDIR}/aes.c
|
||||
${WOLFCRYPT_SRCDIR}/arc4.c
|
||||
${WOLFCRYPT_SRCDIR}/asn.c
|
||||
@ -110,69 +81,56 @@ ${WOLFCRYPT_SRCDIR}/wc_encrypt.c
|
||||
${WOLFCRYPT_SRCDIR}/hash.c
|
||||
${WOLFCRYPT_SRCDIR}/wolfmath.c
|
||||
${WOLFCRYPT_SRCDIR}/kdf.c
|
||||
${WOLFCRYPT_SRCDIR}/sp_int.c
|
||||
${WOLFCRYPT_SRCDIR}/sp_c32.c
|
||||
${WOLFCRYPT_SRCDIR}/sp_c64.c
|
||||
)
|
||||
|
||||
# Use fastmath large number math library.
|
||||
IF(NOT (MSVC AND CMAKE_C_COMPILER_ID MATCHES Clang))
|
||||
# Can't use clang-cl with WOLFSSL_FASTMATH
|
||||
# due to https://bugs.llvm.org/show_bug.cgi?id=25305
|
||||
SET(WOLFSSL_FASTMATH 1)
|
||||
ENDIF()
|
||||
|
||||
IF(WOLFSSL_FASTMATH)
|
||||
SET(USE_FAST_MATH 1)
|
||||
SET(TFM_TIMING_RESISTANT 1)
|
||||
# FP_MAX_BITS is set high solely to satisfy ssl_8k_key.test
|
||||
# WolfSSL will use more stack space with it
|
||||
SET(FP_MAX_BITS 16384)
|
||||
SET(WOLFCRYPT_SOURCES ${WOLFCRYPT_SOURCES} ${WOLFCRYPT_SRCDIR}/tfm.c)
|
||||
IF((CMAKE_SIZEOF_VOID_P MATCHES 4) AND (CMAKE_SYSTEM_PROCESSOR MATCHES "86")
|
||||
AND (NOT MSVC))
|
||||
# Workaround https://github.com/wolfSSL/wolfssl/issues/4245
|
||||
# On 32bit Intel, to satisfy inline assembly's wish for free registers
|
||||
# 1. use -fomit-frame-pointer
|
||||
# 2. With GCC 4, additionally use -fno-PIC, which works on x86
|
||||
# (modern GCC has PIC optimizations, that make it unnecessary)
|
||||
# The following assumes GCC or Clang
|
||||
SET(TFM_COMPILE_FLAGS "-fomit-frame-pointer")
|
||||
IF(CMAKE_C_COMPILER_VERSION VERSION_LESS "5")
|
||||
SET(TFM_COMPILE_FLAGS "${TFM_COMPILE_FLAGS} -fno-PIC")
|
||||
ENDIF()
|
||||
SET_SOURCE_FILES_PROPERTIES(${WOLFCRYPT_SRCDIR}/tfm.c
|
||||
PROPERTIES COMPILE_FLAGS ${TFM_COMPILE_FLAGS})
|
||||
ENDIF()
|
||||
ELSE()
|
||||
SET(WOLFSSL_SP_MATH_ALL 1)
|
||||
SET(WOLFCRYPT_SOURCES ${WOLFCRYPT_SOURCES} ${WOLFCRYPT_SRCDIR}/sp_int.c)
|
||||
ENDIF()
|
||||
|
||||
IF(WOLFSSL_X86_64_BUILD)
|
||||
LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/cpuid.c)
|
||||
IF(MSVC)
|
||||
SET(WOLFSSL_AESNI 1)
|
||||
LIST(APPEND WOLFCRYPT_SOURCES
|
||||
${WOLFCRYPT_SRCDIR}/aes_asm.asm
|
||||
${WOLFCRYPT_SRCDIR}/aes_gcm_asm.asm)
|
||||
IF(CMAKE_C_COMPILER_ID MATCHES Clang)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes -msse4.2 -mpclmul -mrdrnd -mrdseed")
|
||||
ENDIF()
|
||||
ELSEIF(WOLFSSL_INTELASM)
|
||||
SET(WOLFSSL_AESNI 1)
|
||||
SET(USE_INTEL_SPEEDUP 1)
|
||||
LIST(APPEND WOLFCRYPT_SOURCES
|
||||
# Optimizations, assembly
|
||||
if(WOLFSSL_INTELASM)
|
||||
set(WOLFSSL_X86_64_BUILD 1)
|
||||
set(WOLFSSL_SP_X86_64 1)
|
||||
set(WOLFSSL_SP_X86_64_ASM 1)
|
||||
set(WOLFSSL_AESNI 1)
|
||||
target_sources(wolfssl PRIVATE
|
||||
${WOLFCRYPT_SRCDIR}/cpuid.c
|
||||
${WOLFCRYPT_SRCDIR}/sp_x86_64.c
|
||||
)
|
||||
if(MSVC_INTEL)
|
||||
target_sources(wolfssl PRIVATE
|
||||
${WOLFCRYPT_SRCDIR}/aes_asm.asm
|
||||
${WOLFCRYPT_SRCDIR}/aes_gcm_asm.asm
|
||||
${WOLFCRYPT_SRCDIR}/sp_x86_64_asm.asm
|
||||
)
|
||||
target_compile_options(wolfssl PRIVATE
|
||||
$<$<COMPILE_LANG_AND_ID:C,Clang>:-maes -msse4.2 -mpclmul -mrdrnd -mrdseed>
|
||||
$<$<COMPILE_LANGUAGE:ASM_MASM>:/Zi>
|
||||
)
|
||||
else()
|
||||
set(USE_INTEL_SPEEDUP 1)
|
||||
target_sources(wolfssl PRIVATE
|
||||
${WOLFCRYPT_SRCDIR}/aes_asm.S
|
||||
${WOLFCRYPT_SRCDIR}/aes_gcm_asm.S
|
||||
${WOLFCRYPT_SRCDIR}/chacha_asm.S
|
||||
${WOLFCRYPT_SRCDIR}/poly1305_asm.S
|
||||
${WOLFCRYPT_SRCDIR}/sha512_asm.S
|
||||
${WOLFCRYPT_SRCDIR}/sha256_asm.S)
|
||||
ADD_DEFINITIONS(-maes -msse4.2 -mpclmul)
|
||||
# WolfSSL 5.5.4 bug - user_settings.h not included into aes_asm.S
|
||||
SET_PROPERTY(SOURCE ${WOLFCRYPT_SRCDIR}/aes_asm.S APPEND PROPERTY COMPILE_OPTIONS "-DWOLFSSL_X86_64_BUILD")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
${WOLFCRYPT_SRCDIR}/sha256_asm.S
|
||||
${WOLFCRYPT_SRCDIR}/sp_x86_64_asm.S
|
||||
)
|
||||
target_compile_options(wolfssl PRIVATE -maes -msse4.2 -mpclmul)
|
||||
# Workaround 5.5.4 bug (user_settings.h not included into aes_asm.S)
|
||||
set_property(SOURCE ${WOLFCRYPT_SRCDIR}/aes_asm.S APPEND PROPERTY COMPILE_OPTIONS "-DWOLFSSL_X86_64_BUILD")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Silence some warnings
|
||||
if(MSVC)
|
||||
# truncation warnings
|
||||
target_compile_options(wolfssl PRIVATE $<$<COMPILE_LANGUAGE:C>:/wd4244>)
|
||||
if(CMAKE_C_COMPILER_ID MATCHES Clang)
|
||||
target_compile_options(wolfssl PRIVATE $<$<COMPILE_LANGUAGE:C>:-Wno-incompatible-function-pointer-types>)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
CONFIGURE_FILE(user_settings.h.in user_settings.h)
|
||||
INCLUDE_DIRECTORIES(${SSL_INCLUDE_DIRS})
|
||||
ADD_CONVENIENCE_LIBRARY(wolfcrypt ${WOLFCRYPT_SOURCES})
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#define HAVE_AESGCM
|
||||
#define HAVE_CHACHA
|
||||
#define HAVE_POLY1305
|
||||
#define HAVE_THREAD_LS
|
||||
#define WOLFSSL_AES_COUNTER
|
||||
#define NO_WOLFSSL_STUB
|
||||
#define OPENSSL_ALL
|
||||
@ -51,20 +52,19 @@
|
||||
#define NO_RABBIT
|
||||
#define NO_RC4
|
||||
|
||||
/*
|
||||
FP_MAX_BITS is set high solely to satisfy ssl_8k_key.test
|
||||
WolfSSL will use more stack space with it, with fastmath
|
||||
*/
|
||||
#cmakedefine FP_MAX_BITS 16384
|
||||
#define RSA_MAX_SIZE 8192
|
||||
#define WOLFSSL_SP_MATH_ALL
|
||||
#define WOLFSSL_HAVE_SP_RSA
|
||||
#ifndef WOLFSSL_SP_4096
|
||||
#define WOLFSSL_SP_4096
|
||||
#endif
|
||||
|
||||
#cmakedefine WOLFSSL_AESNI
|
||||
#cmakedefine USE_FAST_MATH
|
||||
#cmakedefine TFM_TIMING_RESISTANT
|
||||
#cmakedefine HAVE_INTEL_RDSEED
|
||||
#cmakedefine HAVE_INTEL_RDRAND
|
||||
#cmakedefine USE_INTEL_SPEEDUP
|
||||
#cmakedefine USE_FAST_MATH
|
||||
#cmakedefine WOLFSSL_X86_64_BUILD
|
||||
#cmakedefine WOLFSSL_SP_MATH_ALL
|
||||
#cmakedefine WOLFSSL_SP_X86_64
|
||||
#cmakedefine WOLFSSL_SP_X86_64_ASM
|
||||
|
||||
#endif /* WOLFSSL_USER_SETTINGS_H */
|
||||
|
@ -49,6 +49,7 @@
|
||||
#define HA_OPEN_MERGE_TABLE 2048U
|
||||
#define HA_OPEN_FOR_CREATE 4096U
|
||||
#define HA_OPEN_FOR_DROP (1U << 13) /* Open part of drop */
|
||||
#define HA_OPEN_GLOBAL_TMP_TABLE (1U << 14) /* TMP table used by repliction */
|
||||
|
||||
/*
|
||||
Allow opening even if table is incompatible as this is for ALTER TABLE which
|
||||
@ -377,6 +378,12 @@ enum ha_base_keytype {
|
||||
#define HA_CREATE_INTERNAL_TABLE 256U
|
||||
#define HA_PRESERVE_INSERT_ORDER 512U
|
||||
#define HA_CREATE_NO_ROLLBACK 1024U
|
||||
/*
|
||||
A temporary table that can be used by different threads, eg. replication
|
||||
threads. This flag ensure that memory is not allocated with THREAD_SPECIFIC,
|
||||
as we do for other temporary tables.
|
||||
*/
|
||||
#define HA_CREATE_GLOBAL_TMP_TABLE 2048U
|
||||
|
||||
/* Flags used by start_bulk_insert */
|
||||
|
||||
|
@ -22,14 +22,15 @@
|
||||
#include <m_string.h>
|
||||
#include <my_pthread.h>
|
||||
|
||||
typedef uint32 my_bitmap_map;
|
||||
typedef ulonglong my_bitmap_map;
|
||||
|
||||
typedef struct st_bitmap
|
||||
{
|
||||
my_bitmap_map *bitmap;
|
||||
my_bitmap_map *last_word_ptr;
|
||||
my_bitmap_map last_word_mask;
|
||||
my_bitmap_map last_bit_mask;
|
||||
uint32 n_bits; /* number of bits occupied by the above */
|
||||
my_bool bitmap_allocated;
|
||||
} MY_BITMAP;
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -39,7 +40,7 @@ extern "C" {
|
||||
/* Reset memory. Faster then doing a full bzero */
|
||||
#define my_bitmap_clear(A) ((A)->bitmap= 0)
|
||||
|
||||
extern void create_last_word_mask(MY_BITMAP *map);
|
||||
extern void create_last_bit_mask(MY_BITMAP *map);
|
||||
extern my_bool my_bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits);
|
||||
extern my_bool bitmap_is_clear_all(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size);
|
||||
@ -53,12 +54,12 @@ extern my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern my_bool bitmap_fast_test_and_clear(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern my_bool bitmap_union_is_set_all(const MY_BITMAP *map1,
|
||||
const MY_BITMAP *map2);
|
||||
extern my_bool bitmap_exists_intersection(const MY_BITMAP **bitmap_array,
|
||||
extern my_bool bitmap_exists_intersection(MY_BITMAP **bitmap_array,
|
||||
uint bitmap_count,
|
||||
uint start_bit, uint end_bit);
|
||||
|
||||
extern uint bitmap_set_next(MY_BITMAP *map);
|
||||
extern uint bitmap_get_first(const MY_BITMAP *map);
|
||||
extern uint bitmap_get_first_clear(const MY_BITMAP *map);
|
||||
extern uint bitmap_get_first_set(const MY_BITMAP *map);
|
||||
extern uint bitmap_bits_set(const MY_BITMAP *map);
|
||||
extern uint bitmap_get_next_set(const MY_BITMAP *map, uint bitmap_bit);
|
||||
@ -71,54 +72,70 @@ extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_invert(MY_BITMAP *map);
|
||||
extern void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
/* Functions to export/import bitmaps to an architecture independent format */
|
||||
extern void bitmap_export(uchar *to, MY_BITMAP *map);
|
||||
extern void bitmap_import(MY_BITMAP *map, uchar *from);
|
||||
|
||||
#define my_bitmap_map_bytes sizeof(my_bitmap_map)
|
||||
#define my_bitmap_map_bits (my_bitmap_map_bytes*8)
|
||||
/* Size in bytes to store 'bits' number of bits */
|
||||
#define bitmap_buffer_size(bits) (MY_ALIGN((bits), my_bitmap_map_bits)/8)
|
||||
#define my_bitmap_buffer_size(map) bitmap_buffer_size((map)->n_bits)
|
||||
#define no_bytes_in_export_map(map) (((map)->n_bits + 7)/8)
|
||||
#define no_words_in_map(map) (((map)->n_bits + (my_bitmap_map_bits-1))/my_bitmap_map_bits)
|
||||
|
||||
/* Fast, not thread safe, bitmap functions */
|
||||
#define bitmap_buffer_size(bits) (((bits)+31)/32)*4
|
||||
#define no_bytes_in_map(map) (((map)->n_bits + 7)/8)
|
||||
#define no_words_in_map(map) (((map)->n_bits + 31)/32)
|
||||
#define bytes_word_aligned(bytes) (4*((bytes + 3)/4))
|
||||
/* The following functions must be compatible with create_last_word_mask()! */
|
||||
/* The following functions must be compatible with create_last_bit_mask()! */
|
||||
static inline void
|
||||
bitmap_set_bit(MY_BITMAP *map,uint bit)
|
||||
{
|
||||
uchar *b= (uchar*) map->bitmap + bit / 8;
|
||||
DBUG_ASSERT(bit < map->n_bits);
|
||||
*b= (uchar) (*b | 1U << (bit & 7));
|
||||
map->bitmap[bit/my_bitmap_map_bits]|=
|
||||
(1ULL << (bit & (my_bitmap_map_bits-1)));
|
||||
}
|
||||
static inline void
|
||||
bitmap_flip_bit(MY_BITMAP *map,uint bit)
|
||||
{
|
||||
uchar *b= (uchar*) map->bitmap + bit / 8;
|
||||
DBUG_ASSERT(bit < map->n_bits);
|
||||
*b= (uchar) (*b ^ 1U << (bit & 7));
|
||||
map->bitmap[bit/my_bitmap_map_bits]^=
|
||||
(1ULL << (bit & (my_bitmap_map_bits-1)));
|
||||
}
|
||||
static inline void
|
||||
bitmap_clear_bit(MY_BITMAP *map,uint bit)
|
||||
{
|
||||
uchar *b= (uchar*) map->bitmap + bit / 8;
|
||||
DBUG_ASSERT(bit < map->n_bits);
|
||||
*b= (uchar) (*b & ~(1U << (bit & 7)));
|
||||
map->bitmap[bit/my_bitmap_map_bits]&=
|
||||
~(1ULL << (bit & (my_bitmap_map_bits-1)));
|
||||
}
|
||||
static inline uint
|
||||
bitmap_is_set(const MY_BITMAP *map,uint bit)
|
||||
{
|
||||
const uchar *b= (const uchar*) map->bitmap + bit / 8;
|
||||
DBUG_ASSERT(bit < map->n_bits);
|
||||
return !!(*b & (1U << (bit & 7)));
|
||||
return (!!(map->bitmap[bit/my_bitmap_map_bits] &
|
||||
(1ULL << (bit & (my_bitmap_map_bits-1)))));
|
||||
}
|
||||
|
||||
/* Return true if bitmaps are equal */
|
||||
static inline my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2)
|
||||
{
|
||||
if (memcmp(map1->bitmap, map2->bitmap, 4*(no_words_in_map(map1)-1)) != 0)
|
||||
return FALSE;
|
||||
return ((*map1->last_word_ptr | map1->last_word_mask) ==
|
||||
(*map2->last_word_ptr | map2->last_word_mask));
|
||||
DBUG_ASSERT(map1->n_bits == map2->n_bits);
|
||||
return (memcmp(map1->bitmap, map2->bitmap,
|
||||
my_bitmap_buffer_size(map1)) == 0);
|
||||
}
|
||||
|
||||
#define bitmap_clear_all(MAP) \
|
||||
{ memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); }
|
||||
#define bitmap_set_all(MAP) \
|
||||
(memset((MAP)->bitmap, 0xFF, 4*no_words_in_map((MAP))))
|
||||
{ memset((MAP)->bitmap, 0, my_bitmap_buffer_size(MAP)); }
|
||||
|
||||
static inline void
|
||||
bitmap_set_all(const MY_BITMAP *map)
|
||||
{
|
||||
if (map->n_bits)
|
||||
{
|
||||
memset(map->bitmap, 0xFF, my_bitmap_map_bytes * (no_words_in_map(map)-1));
|
||||
DBUG_ASSERT(map->bitmap + no_words_in_map(map)-1 == map->last_word_ptr);
|
||||
*map->last_word_ptr= ~map->last_bit_mask;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -974,6 +974,7 @@ typedef struct st_mysql_lex_string LEX_STRING;
|
||||
#define SOCKET_ECONNRESET WSAECONNRESET
|
||||
#define SOCKET_ENFILE ENFILE
|
||||
#define SOCKET_EMFILE EMFILE
|
||||
#define SOCKET_CLOSED EIO
|
||||
#else /* Unix */
|
||||
#define socket_errno errno
|
||||
#define closesocket(A) close(A)
|
||||
@ -983,6 +984,7 @@ typedef struct st_mysql_lex_string LEX_STRING;
|
||||
#define SOCKET_EADDRINUSE EADDRINUSE
|
||||
#define SOCKET_ETIMEDOUT ETIMEDOUT
|
||||
#define SOCKET_ECONNRESET ECONNRESET
|
||||
#define SOCKET_CLOSED EIO
|
||||
#define SOCKET_ENFILE ENFILE
|
||||
#define SOCKET_EMFILE EMFILE
|
||||
#endif
|
||||
|
@ -111,7 +111,7 @@ C_MODE_START
|
||||
On AARCH64, we use the generic timer base register. We override clang
|
||||
implementation for aarch64 as it access a PMU register which is not
|
||||
guaranteed to be active.
|
||||
On RISC-V, we use the rdcycle instruction to read from mcycle register.
|
||||
On RISC-V, we use the rdtime instruction to read from mtime register.
|
||||
|
||||
Sadly, we have nothing for the Digital Alpha, MIPS, Motorola m68k,
|
||||
HP PA-RISC or other non-mainstream (or obsolete) processors.
|
||||
@ -211,15 +211,15 @@ static inline ulonglong my_timer_cycles(void)
|
||||
}
|
||||
#elif defined(__riscv)
|
||||
#define MY_TIMER_ROUTINE_CYCLES MY_TIMER_ROUTINE_RISCV
|
||||
/* Use RDCYCLE (and RDCYCLEH on riscv32) */
|
||||
/* Use RDTIME (and RDTIMEH on riscv32) */
|
||||
{
|
||||
# if __riscv_xlen == 32
|
||||
ulong result_lo, result_hi0, result_hi1;
|
||||
/* Implemented in assembly because Clang insisted on branching. */
|
||||
__asm __volatile__(
|
||||
"rdcycleh %0\n"
|
||||
"rdcycle %1\n"
|
||||
"rdcycleh %2\n"
|
||||
"rdtimeh %0\n"
|
||||
"rdtime %1\n"
|
||||
"rdtimeh %2\n"
|
||||
"sub %0, %0, %2\n"
|
||||
"seqz %0, %0\n"
|
||||
"sub %0, zero, %0\n"
|
||||
@ -228,7 +228,7 @@ static inline ulonglong my_timer_cycles(void)
|
||||
return (static_cast<ulonglong>(result_hi1) << 32) | result_lo;
|
||||
# else
|
||||
ulonglong result;
|
||||
__asm __volatile__("rdcycle %0" : "=r"(result));
|
||||
__asm __volatile__("rdtime %0" : "=r"(result));
|
||||
return result;
|
||||
}
|
||||
# endif
|
||||
|
44
include/mysql/service_print_check_msg.h
Normal file
44
include/mysql/service_print_check_msg.h
Normal file
@ -0,0 +1,44 @@
|
||||
/* Copyright (c) 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
|
||||
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
@file include/mysql/service_print_check_msg.h
|
||||
This service provides functions to write messages for check or repair
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
extern struct print_check_msg_service_st {
|
||||
void (*print_check_msg)(MYSQL_THD, const char *db_name, const char *table_name,
|
||||
const char *op, const char *msg_type, const char *message,
|
||||
my_bool print_to_log);
|
||||
} *print_check_msg_service;
|
||||
|
||||
#ifdef MYSQL_DYNAMIC_PLUGIN
|
||||
# define print_check_msg_context(_THD) print_check_msg_service->print_check_msg
|
||||
#else
|
||||
extern void print_check_msg(MYSQL_THD, const char *db_name, const char *table_name,
|
||||
const char *op, const char *msg_type, const char *message,
|
||||
my_bool print_to_log);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
@ -457,6 +457,7 @@ typedef struct st_net {
|
||||
my_bool thread_specific_malloc;
|
||||
unsigned char compress;
|
||||
my_bool pkt_nr_can_be_reset;
|
||||
my_bool using_proxy_protocol;
|
||||
/*
|
||||
Pointer to query object in query cache, do not equal NULL (0) for
|
||||
queries in cache that have not stored its results yet
|
||||
|
@ -44,6 +44,7 @@
|
||||
#define VERSION_wsrep 0x0500
|
||||
#define VERSION_json 0x0100
|
||||
#define VERSION_thd_mdl 0x0100
|
||||
#define VERSION_print_check_msg 0x0100
|
||||
#define VERSION_sql_service 0x0102
|
||||
|
||||
#define VERSION_provider_bzip2 0x0100
|
||||
|
@ -41,6 +41,13 @@ enum enum_vio_type
|
||||
VIO_TYPE_SSL
|
||||
/* see also vio_type_names[] */
|
||||
};
|
||||
|
||||
enum enum_vio_state
|
||||
{
|
||||
VIO_STATE_NOT_INITIALIZED, VIO_STATE_ACTIVE, VIO_STATE_SHUTDOWN,
|
||||
VIO_STATE_CLOSED
|
||||
};
|
||||
|
||||
#define FIRST_VIO_TYPE VIO_CLOSED
|
||||
#define LAST_VIO_TYPE VIO_TYPE_SSL
|
||||
|
||||
@ -244,6 +251,7 @@ struct st_vio
|
||||
struct sockaddr_storage local; /* Local internet address */
|
||||
struct sockaddr_storage remote; /* Remote internet address */
|
||||
enum enum_vio_type type; /* Type of connection */
|
||||
enum enum_vio_state state; /* State of the connection */
|
||||
const char *desc; /* String description */
|
||||
char *read_buffer; /* buffer for vio_read_buff */
|
||||
char *read_pos; /* start of unfetched data in the
|
||||
|
@ -23,6 +23,8 @@ void init_embedded_mysql(MYSQL *mysql, ulong client_flag);
|
||||
void *create_embedded_thd(ulong client_flag);
|
||||
int check_embedded_connection(MYSQL *mysql, const char *db);
|
||||
void free_old_query(MYSQL *mysql);
|
||||
THD *embedded_get_current_thd();
|
||||
void embedded_set_current_thd(THD *thd);
|
||||
extern MYSQL_METHODS embedded_methods;
|
||||
|
||||
/* This one is used by embedded library to gather returning data */
|
||||
|
@ -111,7 +111,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
|
||||
MYSQL_STMT *stmt)
|
||||
{
|
||||
my_bool result= 1;
|
||||
THD *thd=(THD *) mysql->thd;
|
||||
THD *thd=(THD *) mysql->thd, *old_current_thd= current_thd;
|
||||
NET *net= &mysql->net;
|
||||
my_bool stmt_skip= stmt ? stmt->state != MYSQL_STMT_INIT_DONE : FALSE;
|
||||
|
||||
@ -122,6 +122,8 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
|
||||
else
|
||||
{
|
||||
free_embedded_thd(mysql);
|
||||
if (old_current_thd == thd)
|
||||
old_current_thd= 0;
|
||||
thd= 0;
|
||||
}
|
||||
}
|
||||
@ -179,6 +181,8 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
|
||||
|
||||
end:
|
||||
thd->reset_globals();
|
||||
if (old_current_thd)
|
||||
old_current_thd->store_globals();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -265,6 +269,7 @@ static my_bool emb_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt)
|
||||
mysql->server_status|= SERVER_STATUS_IN_TRANS;
|
||||
|
||||
stmt->fields= mysql->fields;
|
||||
free_root(&stmt->mem_root, MYF(0));
|
||||
stmt->mem_root= res->alloc;
|
||||
mysql->fields= NULL;
|
||||
my_free(res);
|
||||
@ -374,6 +379,7 @@ int emb_read_binary_rows(MYSQL_STMT *stmt)
|
||||
set_stmt_errmsg(stmt, &stmt->mysql->net);
|
||||
return 1;
|
||||
}
|
||||
free_root(&stmt->result.alloc, MYF(0));
|
||||
stmt->result= *data;
|
||||
my_free(data);
|
||||
set_stmt_errmsg(stmt, &stmt->mysql->net);
|
||||
@ -432,12 +438,15 @@ int emb_unbuffered_fetch(MYSQL *mysql, char **row)
|
||||
|
||||
static void free_embedded_thd(MYSQL *mysql)
|
||||
{
|
||||
THD *thd= (THD*)mysql->thd;
|
||||
THD *thd= (THD*)mysql->thd, *org_current_thd= current_thd;
|
||||
server_threads.erase(thd);
|
||||
thd->clear_data_list();
|
||||
thd->store_globals();
|
||||
delete thd;
|
||||
set_current_thd(nullptr);
|
||||
if (thd == org_current_thd)
|
||||
set_current_thd(nullptr);
|
||||
else
|
||||
set_current_thd(org_current_thd);
|
||||
mysql->thd=0;
|
||||
}
|
||||
|
||||
@ -727,6 +736,17 @@ void *create_embedded_thd(ulong client_flag)
|
||||
}
|
||||
|
||||
|
||||
THD *embedded_get_current_thd()
|
||||
{
|
||||
return current_thd;
|
||||
}
|
||||
|
||||
void embedded_set_current_thd(THD *thd)
|
||||
{
|
||||
set_current_thd(thd);
|
||||
}
|
||||
|
||||
|
||||
#ifdef NO_EMBEDDED_ACCESS_CHECKS
|
||||
static void
|
||||
emb_transfer_connect_attrs(MYSQL *mysql)
|
||||
|
@ -78,7 +78,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
|
||||
uint port, const char *unix_socket,ulong client_flag)
|
||||
{
|
||||
char name_buff[USERNAME_LENGTH];
|
||||
|
||||
THD *org_current_thd= embedded_get_current_thd();
|
||||
DBUG_ENTER("mysql_real_connect");
|
||||
DBUG_PRINT("enter",("host: %s db: %s user: %s (libmysqld)",
|
||||
host ? host : "(Null)",
|
||||
@ -200,6 +200,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
|
||||
}
|
||||
}
|
||||
}
|
||||
embedded_set_current_thd(org_current_thd);
|
||||
|
||||
DBUG_PRINT("exit",("Mysql handler: %p", mysql));
|
||||
DBUG_RETURN(mysql);
|
||||
@ -216,6 +217,7 @@ error:
|
||||
mysql_close(mysql);
|
||||
mysql->free_me=free_me;
|
||||
}
|
||||
embedded_set_current_thd(org_current_thd);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@ SET(MYSQLSERVICES_SOURCES
|
||||
my_crypt_service.c
|
||||
my_md5_service.c
|
||||
my_print_error_service.c
|
||||
print_check_msg_service.c
|
||||
my_sha1_service.c
|
||||
my_sha2_service.c
|
||||
my_snprintf_service.c
|
||||
|
18
libservices/print_check_msg_service.c
Normal file
18
libservices/print_check_msg_service.c
Normal file
@ -0,0 +1,18 @@
|
||||
/* Copyright (c) 2024, MariaDB Plc
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <service_versions.h>
|
||||
SERVICE_VERSION print_check_msg_context= (void*) VERSION_print_check_msg;
|
@ -25,7 +25,7 @@ IF(NOT WITHOUT_SERVER)
|
||||
INSTALL_MANPAGES(Server
|
||||
wsrep_sst_rsync.1 wsrep_sst_common.1 wsrep_sst_mariabackup.1
|
||||
wsrep_sst_mysqldump.1 wsrep_sst_rsync_wan.1 galera_recovery.1
|
||||
galera_new_cluster.1)
|
||||
galera_new_cluster.1 wsrep_sst_backup.1)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
INSTALL_MANPAGES(Client
|
||||
|
@ -1,6 +1,6 @@
|
||||
'\" t
|
||||
.\"
|
||||
.TH "\fBMY_PRINT_DEFAULTS\fR" "1" "15 May 2020" "MariaDB 10.11" "MariaDB Database System"
|
||||
.TH "\fBMY_PRINT_DEFAULTS\fR" "1" "18 December 2023" "MariaDB 10.11" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
@ -146,6 +146,22 @@ In addition to the groups named on the command line, read groups that have the g
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
.\" my_print_defaults: --mariadbd option
|
||||
.\" mariadbd option: my_print_defaults
|
||||
\fB\-\-mariadbd\fR
|
||||
.sp
|
||||
Read the same set of groups that the mariadbd binary does.
|
||||
.RE
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
|
||||
.\" my_print_defaults: --mysqld option
|
||||
.\" mysqld option: my_print_defaults
|
||||
\fB\-\-mysqld\fR
|
||||
|
16
man/wsrep_sst_backup.1
Normal file
16
man/wsrep_sst_backup.1
Normal file
@ -0,0 +1,16 @@
|
||||
'\" t
|
||||
.\"
|
||||
.TH "\FBWSREP_SST_BACKUP\FR" "1" "22 May 2022" "MariaDB 10\&.3" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" disable hyphenation
|
||||
.nh
|
||||
.\" disable justification (adjust text to left margin only)
|
||||
.ad l
|
||||
.SH NAME
|
||||
wsrep_sst_backup \- backup helper script for the MariaDB Galera Cluster
|
||||
.SH DESCRIPTION
|
||||
Use: See source code of script\.
|
||||
.PP
|
||||
For more information, please refer to the MariaDB Knowledge Base, available online at https://mariadb.com/kb/
|
11
mysql-test/include/aria_log_control_load.inc
Normal file
11
mysql-test/include/aria_log_control_load.inc
Normal file
@ -0,0 +1,11 @@
|
||||
#
|
||||
# This file loads aria_log_control file into a user variable @aria_log_control.
|
||||
# Set $ARIA_DATADIR before including this file
|
||||
#
|
||||
|
||||
--disable_query_log
|
||||
--copy_file $ARIA_DATADIR/aria_log_control $MYSQLTEST_VARDIR/aria_log_control_tmp
|
||||
--chmod 0777 $MYSQLTEST_VARDIR/aria_log_control_tmp
|
||||
--eval SET @aria_log_control=(SELECT LOAD_FILE('$MYSQLTEST_VARDIR/aria_log_control_tmp'))
|
||||
--remove_file $MYSQLTEST_VARDIR/aria_log_control_tmp
|
||||
--enable_query_log
|
@ -32,7 +32,6 @@ if ($tmp)
|
||||
--echo Relay_Master_Log_File #
|
||||
--echo Slave_IO_Running No
|
||||
--echo Slave_SQL_Running No
|
||||
--echo Replicate_Rewrite_DB #
|
||||
--echo Replicate_Do_DB #
|
||||
--echo Replicate_Ignore_DB #
|
||||
--echo Replicate_Do_Table #
|
||||
@ -74,13 +73,22 @@ if ($tmp)
|
||||
--echo Slave_DDL_Groups #
|
||||
--echo Slave_Non_Transactional_Groups #
|
||||
--echo Slave_Transactional_Groups #
|
||||
--echo Replicate_Rewrite_DB #
|
||||
}
|
||||
if (!$tmp) {
|
||||
# Note: after WL#5177, fields 13-18 shall not be filtered-out.
|
||||
--replace_column 4 # 5 # 6 # 7 # 8 # 9 # 10 # 13 # 14 # 15 # 16 # 17 # 18 # 19 # 23 # 24 # 25 # 26 # 27 # 41 # 42 # 43 # 45 # 52 # 53 # 54 #
|
||||
--replace_column 4 # 5 # 6 # 7 # 8 # 9 # 10 # 13 # 14 # 15 # 16 # 17 # 18 # 22 # 23 # 24 # 25 # 26 # 40 # 41 # 42 # 44 # 51 # 52 # 53 # 54 #
|
||||
query_vertical
|
||||
SHOW SLAVE STATUS;
|
||||
}
|
||||
#
|
||||
# Note, we must never, _ever_, add extra rows to this output of SHOW SLAVE
|
||||
# STATUS, except at the very end, as this breaks backwards compatibility
|
||||
# with applications or scripts that parse the output. This also means that
|
||||
# we cannot add _any_ new rows in a GA version if a different row was
|
||||
# already added in a later MariaDB version, as this would make it impossible
|
||||
# to merge the change up while preserving the order of rows.
|
||||
#
|
||||
|
||||
#
|
||||
# Ensure that we don't get warnings from mysql.proc (used by check_mysqld)
|
||||
|
@ -103,7 +103,6 @@ connection con2;
|
||||
|
||||
# The following query should hang because con1 is locking the record
|
||||
update t2 set a=2 where b = 0;
|
||||
select * from t2;
|
||||
--send
|
||||
update t1 set x=2 where id = 0;
|
||||
--sleep 2
|
||||
|
@ -22,7 +22,6 @@ select * from t1;
|
||||
connection con1;
|
||||
begin work;
|
||||
insert into t1 values (5);
|
||||
select * from t1;
|
||||
# Lock wait timeout set to 2 seconds in <THIS TEST>-master.opt; this
|
||||
# statement will time out; in 5.0.13+, it will not roll back transaction.
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
|
297
mysql-test/include/rpl_clone_slave_using_mariadb-backup.inc
Normal file
297
mysql-test/include/rpl_clone_slave_using_mariadb-backup.inc
Normal file
@ -0,0 +1,297 @@
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
--let MASTER_MYPORT= $NODE_MYPORT_1
|
||||
--connect master, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
--connect slave, 127.0.0.1, root, , test, $NODE_MYPORT_3
|
||||
--disable_query_log
|
||||
--replace_result $MASTER_MYPORT ###
|
||||
--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$MASTER_MYPORT, MASTER_USE_GTID=NO;
|
||||
--enable_query_log
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
|
||||
--let XTRABACKUP_BACKUP_OPTIONS=--no-defaults --user=root --host='127.0.0.1' --port=$NODE_MYPORT_3
|
||||
--let XTRABACKUP_COPY_BACK_OPTIONS= --no-defaults
|
||||
}
|
||||
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--let XTRABACKUP_BACKUP_OPTIONS=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group-suffix=.2
|
||||
--let XTRABACKUP_COPY_BACK_OPTIONS=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group-suffix=.2
|
||||
}
|
||||
|
||||
--connection master
|
||||
--let $MYSQLD_DATADIR_MASTER= `select @@datadir`
|
||||
--connection slave
|
||||
--let $MYSQLD_DATADIR_SLAVE= `select @@datadir`
|
||||
|
||||
# This test covers the filename:pos based synchronization
|
||||
# between the master and the slave.
|
||||
# If we ever need to test a GTID based synchronization,
|
||||
# it should be done in a separate test.
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Initial block with some transactions
|
||||
|
||||
--echo ### Slave: Make sure replication is not using GTID
|
||||
--connection slave
|
||||
--let $value= query_get_value(SHOW SLAVE STATUS, "Using_Gtid", 1)
|
||||
--echo # Using_Gtid=$value
|
||||
|
||||
--echo ### Master: Create and populate t1
|
||||
--connection master
|
||||
CREATE TABLE t1(a TEXT) ENGINE=InnoDB;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#00 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#01 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#02 - slave run#0, before backup');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Run the last transaction before mariadb-backup --backup
|
||||
--echo ### Remember SHOW MASTER STATUS and @@gtid_binlog_pos
|
||||
--echo ### before and after the transaction.
|
||||
|
||||
--echo ### Master: Rember MASTER STATUS and @@gtid_binlog_pos before tr#01
|
||||
--connection master
|
||||
--let $master_before_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $master_before_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $master_before_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Slave: Remember MASTER STATUS and @@gtid_binlog_pos before tr#01
|
||||
--connection slave
|
||||
--let $slave_before_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $slave_before_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $slave_before_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Master: Run the actual last transaction before the backup
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#00 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#01 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#02 - slave run#0, before backup');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
--echo ### Master: Remember MASTER STATUS and @@gtid_binlog_pos after tr#01
|
||||
--connection master
|
||||
--let $master_after_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $master_after_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $master_after_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Slave: Remember MASTER STATUS and @@gtid_binlog_pos after tr#01
|
||||
--connection slave
|
||||
--let $slave_after_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $slave_after_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $slave_after_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Running `mariadb-backup --backup,--prepare` and checking
|
||||
--echo ### that mariadb_backup_slave_info and mariadb_backup_binlog_info are OK
|
||||
|
||||
--echo ### Slave: Create a backup
|
||||
--let $backup_slave=$MYSQLTEST_VARDIR/tmp/backup-slave
|
||||
--disable_result_log
|
||||
--exec $XTRABACKUP $XTRABACKUP_BACKUP_OPTIONS --slave-info --backup --target-dir=$backup_slave
|
||||
--enable_result_log
|
||||
|
||||
--echo ### Slave: Prepare the backup
|
||||
--exec $XTRABACKUP --prepare --target-dir=$backup_slave
|
||||
|
||||
--echo ### Slave: xtrabackup files:
|
||||
--echo ############################ mariadb_backup_slave_info
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position
|
||||
--cat_file $backup_slave/mariadb_backup_slave_info
|
||||
--echo ############################ mariadb_backup_binlog_info
|
||||
--replace_result $slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_file $slave_after_tr01_show_master_status_position slave_after_tr01_show_master_status_position $slave_after_tr01_gtid_binlog_pos slave_after_tr01_gtid_binlog_pos
|
||||
--cat_file $backup_slave/mariadb_backup_binlog_info
|
||||
--echo ############################
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Run more transactions after the backup:
|
||||
--echo ### - while the slave is still running, then
|
||||
--echo ### - while the slave is shut down
|
||||
|
||||
--echo ### Master: Run another transaction while the slave is still running
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt#00 - slave run#0, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt#01 - slave run#0, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt@02 - slave run#0, after backup');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
--echo ### Master: Remember MASTER STATUS and @@gtid_binlog_pos after tr#02
|
||||
--connection master
|
||||
--let $master_after_tr02_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $master_after_tr02_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $master_after_tr02_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Slave: Remember MASTER STATUS and @@gtid_binlog_pos after tr#02
|
||||
--connection slave
|
||||
--let $slave_after_tr02_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $slave_after_tr02_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $slave_after_tr02_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
|
||||
--echo ### Master: Checking SHOW BINLOG EVENTS
|
||||
|
||||
--connection master
|
||||
--vertical_results
|
||||
### The BEGIN event
|
||||
--replace_column 4 # 5 #
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position $master_after_tr02_gtid_binlog_pos master_after_tr02_gtid_binlog_pos
|
||||
--eval SHOW BINLOG EVENTS IN '$master_after_tr01_show_master_status_file' FROM $master_after_tr01_show_master_status_position LIMIT 0,1
|
||||
### The INSERT event
|
||||
--replace_column 2 # 4 # 5 #
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position
|
||||
# Hide the difference between row and stmt binary logging
|
||||
--replace_regex /use `test`; // /(Query|Annotate_rows)/Query_or_Annotate_rows/
|
||||
--eval SHOW BINLOG EVENTS IN '$master_after_tr01_show_master_status_file' FROM $master_after_tr01_show_master_status_position LIMIT 1,1
|
||||
--horizontal_results
|
||||
|
||||
--echo ### Slave: Checking SHOW BINLOG EVENTS
|
||||
--connection slave
|
||||
--vertical_results
|
||||
### The BEGIN event
|
||||
--replace_column 2 # 5 #
|
||||
--replace_result $slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_file $slave_after_tr01_show_master_status_position slave_after_tr01_show_master_status_position $slave_after_tr02_gtid_binlog_pos slave_after_tr02_gtid_binlog_pos
|
||||
--eval SHOW BINLOG EVENTS IN '$slave_after_tr01_show_master_status_file' FROM $slave_after_tr01_show_master_status_position LIMIT 0,1
|
||||
### The INSERT event
|
||||
--replace_column 2 # 4 # 5 #
|
||||
--replace_result $slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_file $slave_after_tr01_show_master_status_position slave_after_tr01_show_master_status_position $slave_after_tr02_gtid_binlog_pos slave_after_tr02_gtid_binlog_pos
|
||||
# Hide the difference between row and stmt binary logging
|
||||
--replace_regex /use `test`; // /(Query|Annotate_rows)/Query_or_Annotate_rows/
|
||||
--eval SHOW BINLOG EVENTS IN '$slave_after_tr01_show_master_status_file' FROM $slave_after_tr01_show_master_status_position LIMIT 1,1
|
||||
--horizontal_results
|
||||
|
||||
--echo ### Slave: Stop replication
|
||||
--connection slave
|
||||
STOP SLAVE;
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
RESET SLAVE;
|
||||
|
||||
--echo ### Slave: Shutdown the server
|
||||
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--let $rpl_server_number= 2
|
||||
--source include/rpl_stop_server.inc
|
||||
}
|
||||
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
--connection slave
|
||||
--source $MYSQL_TEST_DIR/include/shutdown_mysqld.inc
|
||||
}
|
||||
|
||||
--echo ### Master: Run a transaction while the slave is shut down
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#00 - after slave run#0, slave is shut down, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#01 - after slave run#0, slave is shut down, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#02 - after slave run#0, slave is shut down, after backup');
|
||||
COMMIT;
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Emulate starting a new virgin slave
|
||||
|
||||
--echo ### Slave: Remove the data directory
|
||||
--rmdir $MYSQLD_DATADIR_SLAVE
|
||||
|
||||
--echo ### Slave: Copy back the backup
|
||||
--exec $XTRABACKUP $XTRABACKUP_COPY_BACK_OPTIONS --copy-back --datadir=$MYSQLD_DATADIR_SLAVE --target-dir=$backup_slave
|
||||
|
||||
--echo ### Slave: Restart the server
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--let $rpl_server_number= 2
|
||||
--source include/rpl_start_server.inc
|
||||
--source include/wait_until_connected_again.inc
|
||||
}
|
||||
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
--connection slave
|
||||
--source $MYSQL_TEST_DIR/include/start_mysqld.inc
|
||||
}
|
||||
|
||||
--echo ### Slave: Display the restored data before START SLAVE
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
|
||||
--echo ### Slave: Execute the CHANGE MASTER statement to set up the host and port
|
||||
--replace_result $MASTER_MYPORT ###
|
||||
--eval CHANGE MASTER '' TO MASTER_USER='root', MASTER_HOST='127.0.0.1', MASTER_PORT=$MASTER_MYPORT, MASTER_CONNECT_RETRY=1
|
||||
|
||||
--echo ### Slave: Execute the CHANGE MASTER statement from mariadb_backup_slave_info
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position
|
||||
--source $backup_slave/mariadb_backup_slave_info
|
||||
|
||||
--echo ### Slave: Execute START SLAVE
|
||||
--source include/start_slave.inc
|
||||
|
||||
--echo ### Master: Wait for the slave to apply all master events
|
||||
--connection master
|
||||
--sync_slave_with_master slave
|
||||
|
||||
--echo ### Slave: Make sure replication is not using GTID after the slave restart
|
||||
--connection slave
|
||||
--let $value= query_get_value(SHOW SLAVE STATUS, "Using_Gtid", 1)
|
||||
--echo # Using_Gtid=$value
|
||||
|
||||
--echo ### Slave: Display the restored data after START SLAVE
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Continue master transactions, check the new slave replicates well.
|
||||
|
||||
--echo ### Master: Run a transaction after restarting replication
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#00 - slave run#1');
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#01 - slave run#1');
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#02 - slave run#1');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
--echo ### Slave: Display the restored data + new transactions
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Cleanup
|
||||
|
||||
--echo ### Removing the backup directory
|
||||
--rmdir $backup_slave
|
||||
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
--sync_slave_with_master
|
||||
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--source include/rpl_end.inc
|
||||
}
|
||||
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
STOP SLAVE;
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
RESET SLAVE ALL;
|
||||
|
||||
--connection master
|
||||
set global wsrep_on=OFF;
|
||||
RESET MASTER;
|
||||
set global wsrep_on=ON;
|
||||
}
|
@ -9,9 +9,6 @@
|
||||
#
|
||||
# The environment variables SEARCH_FILE and SEARCH_PATTERN must be set
|
||||
# before sourcing this routine.
|
||||
# SEARCH_TYPE can also be set to either NULL(default) or _gm_
|
||||
# NULL is equivalent of using m/SEARCH_PATTERN/gs
|
||||
# _gm_ is equivalent of using m/SEARCH_RANGE/gm
|
||||
#
|
||||
# Optionally, SEARCH_RANGE can be set to the max number of bytes of the file
|
||||
# to search. If negative, it will search that many bytes at the end of the
|
||||
@ -25,6 +22,7 @@
|
||||
# Supported formats:
|
||||
# - (default) : "FOUND n /pattern/ in FILE " or "NOT FOUND ..."
|
||||
# - "matches" : Each match is printed, on a separate line
|
||||
# - "count" : "FOUND n matches in FILE" or "NOT FOUND ..." (omit pattern)
|
||||
#
|
||||
# In case of
|
||||
# - SEARCH_FILE and/or SEARCH_PATTERN is not set
|
||||
@ -51,15 +49,12 @@
|
||||
# Created: 2011-11-11 mleich
|
||||
#
|
||||
|
||||
--error 0,1
|
||||
perl;
|
||||
use strict;
|
||||
die "SEARCH_FILE not set" unless $ENV{SEARCH_FILE};
|
||||
my @search_files= glob($ENV{SEARCH_FILE});
|
||||
my $search_pattern= $ENV{SEARCH_PATTERN} or die "SEARCH_PATTERN not set";
|
||||
my $search_range= $ENV{SEARCH_RANGE};
|
||||
my $silent= $ENV{SEARCH_SILENT};
|
||||
my $search_result= 0;
|
||||
my $content;
|
||||
foreach my $search_file (@search_files) {
|
||||
open(FILE, '<', $search_file) || die("Can't open file $search_file: $!");
|
||||
@ -83,48 +78,23 @@ perl;
|
||||
close(FILE);
|
||||
$content.= $file_content;
|
||||
}
|
||||
my @matches;
|
||||
if (not defined($ENV{SEARCH_TYPE}))
|
||||
{
|
||||
@matches=($content =~ /$search_pattern/gs);
|
||||
}
|
||||
elsif($ENV{SEARCH_TYPE} == "_gm_")
|
||||
{
|
||||
@matches=($content =~ /$search_pattern/gm);
|
||||
}
|
||||
my $res;
|
||||
if (@matches)
|
||||
{
|
||||
$res="FOUND " . scalar(@matches);
|
||||
$search_result= 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
$res= "NOT FOUND";
|
||||
}
|
||||
my @matches= ($content =~ /$search_pattern/gs);
|
||||
my $res=@matches ? "FOUND " . scalar(@matches) : "NOT FOUND";
|
||||
|
||||
$ENV{SEARCH_FILE} =~ s{^.*?([^/\\]+)$}{$1};
|
||||
|
||||
if (!$silent || $search_result)
|
||||
{
|
||||
if ($ENV{SEARCH_OUTPUT} eq "matches")
|
||||
{
|
||||
foreach (@matches)
|
||||
{
|
||||
print $_ . "\n";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n";
|
||||
if ($ENV{SEARCH_OUTPUT} eq "matches") {
|
||||
foreach (@matches) {
|
||||
print $_ . "\n";
|
||||
}
|
||||
}
|
||||
die "$ENV{SEARCH_ABORT}\n"
|
||||
if $ENV{SEARCH_ABORT} && $res =~ /^$ENV{SEARCH_ABORT}/;
|
||||
exit($search_result != 1);
|
||||
elsif ($ENV{SEARCH_OUTPUT} eq "count")
|
||||
{
|
||||
print "$res matches in $ENV{SEARCH_FILE}\n";
|
||||
}
|
||||
elsif ($ENV{SEARCH_ABORT} and $res =~ /^$ENV{SEARCH_ABORT}/) {
|
||||
die "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n";
|
||||
} else {
|
||||
print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n";
|
||||
}
|
||||
EOF
|
||||
|
||||
let $SEARCH_RESULT= 1; # Found pattern
|
||||
if ($errno)
|
||||
{
|
||||
let $SEARCH_RESULT= 0; # Did not find pattern
|
||||
}
|
||||
|
@ -25,23 +25,23 @@ if (!$_timeout)
|
||||
}
|
||||
|
||||
let $_timeout_counter=`SELECT $_timeout * 10`;
|
||||
let SEARCH_SILENT=1;
|
||||
|
||||
let SEARCH_ABORT=NOT FOUND;
|
||||
let $_continue= 1;
|
||||
disable_abort_on_error;
|
||||
while ($_continue)
|
||||
{
|
||||
source include/search_pattern_in_file.inc;
|
||||
if ($SEARCH_RESULT)
|
||||
if (!$errno)
|
||||
{
|
||||
# Found match
|
||||
let $_continue= 0;
|
||||
}
|
||||
if (!$SEARCH_RESULT)
|
||||
if ($errno)
|
||||
{
|
||||
dec $_timeout_counter;
|
||||
if ($_timeout_counter == 1)
|
||||
{
|
||||
let $SEARCH_SILENT= 0;
|
||||
enable_abort_on_error;
|
||||
}
|
||||
if (!$_timeout_counter)
|
||||
{
|
||||
@ -49,8 +49,7 @@ while ($_continue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let SEARCH_SILENT=0;
|
||||
enable_abort_on_error;
|
||||
|
||||
--source include/end_include_file.inc
|
||||
--let $keep_include_silent=$wait_save_keep_include_silent
|
||||
|
@ -11,7 +11,7 @@ let $counter= 5000;
|
||||
let $mysql_errno= 9999;
|
||||
while ($mysql_errno)
|
||||
{
|
||||
--error 0,ER_ACCESS_DENIED_ERROR,ER_SERVER_SHUTDOWN,ER_CONNECTION_KILLED,ER_LOCK_WAIT_TIMEOUT,2002,2006,2013,HA_ERR_NO_ENCRYPTION
|
||||
--error 0,ER_ACCESS_DENIED_ERROR,ER_SERVER_SHUTDOWN,ER_CONNECTION_KILLED,ER_LOCK_WAIT_TIMEOUT,2002,2006,2013,HA_ERR_NO_ENCRYPTION,2026
|
||||
select 1;
|
||||
|
||||
dec $counter;
|
||||
|
@ -87,12 +87,16 @@ sub flush_out {
|
||||
$out_line = "";
|
||||
}
|
||||
|
||||
use if $^O eq "MSWin32", "threads::shared";
|
||||
my $flush_lock :shared;
|
||||
|
||||
# Print to stdout
|
||||
sub print_out {
|
||||
if(IS_WIN32PERL) {
|
||||
$out_line .= $_[0];
|
||||
# Flush buffered output on new lines.
|
||||
if (rindex($_[0], "\n") != -1) {
|
||||
lock($flush_lock);
|
||||
flush_out();
|
||||
}
|
||||
} else {
|
||||
|
@ -3102,6 +3102,14 @@ CREATE TEMPORARY TABLE t2 LIKE t1;
|
||||
DROP TEMPORARY TABLE t2;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-33313 Incorrect error message for "ALTER TABLE ... DROP CONSTRAINT ..., DROP col, DROP col"
|
||||
#
|
||||
create table t2(id int primary key) engine=innodb;
|
||||
create table t1(id int primary key, t2_id int, constraint t1_fk_t2_id foreign key(t2_id) references t2(id)) engine=innodb;
|
||||
alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id;
|
||||
ERROR 42000: Can't DROP COLUMN `t2_id`; check that it exists
|
||||
drop table t1, t2;
|
||||
#
|
||||
# End of 10.6 tests
|
||||
#
|
||||
#
|
||||
|
@ -2392,6 +2392,15 @@ CREATE TEMPORARY TABLE t2 LIKE t1;
|
||||
DROP TEMPORARY TABLE t2;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33313 Incorrect error message for "ALTER TABLE ... DROP CONSTRAINT ..., DROP col, DROP col"
|
||||
--echo #
|
||||
create table t2(id int primary key) engine=innodb;
|
||||
create table t1(id int primary key, t2_id int, constraint t1_fk_t2_id foreign key(t2_id) references t2(id)) engine=innodb;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id;
|
||||
drop table t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.6 tests
|
||||
--echo #
|
||||
|
@ -74,7 +74,7 @@ SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
|
||||
--echo # MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init
|
||||
--echo #
|
||||
--error 1
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE 2>/dev/null
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check
|
||||
|
@ -12,7 +12,6 @@ INSERT INTO t1 VALUES (1,REPEAT('a',100)),(2,REPEAT('v',200)),(3,REPEAT('r',300)
|
||||
INSERT INTO t1 VALUES (5,REPEAT('k',500)),(6,'April'),(7,7),(8,""),(9,"M"),(10,DEFAULT);
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
ALTER TABLE t1 CHECK PARTITION p2;
|
||||
Table Op Msg_type Msg_text
|
||||
|
@ -780,3 +780,27 @@ string
|
||||
#
|
||||
# End of 10.2 tests
|
||||
#
|
||||
#
|
||||
# MDEV-33318 ORDER BY COLLATE improperly applied to non-character columns
|
||||
#
|
||||
set names utf8;
|
||||
create table t1 (ts datetime);
|
||||
insert t1 values ('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'),
|
||||
('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'),
|
||||
('2024-01-26 21:37:58'), ('2024-01-26 21:37:58'),
|
||||
('2024-01-26 21:37:58'), ('2024-01-26 21:38:02'),
|
||||
('2024-01-26 21:38:02'), ('2024-01-26 21:38:02');
|
||||
select * from t1 order by ts collate utf8_bin;
|
||||
ts
|
||||
2024-01-26 21:37:54
|
||||
2024-01-26 21:37:54
|
||||
2024-01-26 21:37:54
|
||||
2024-01-26 21:37:54
|
||||
2024-01-26 21:37:58
|
||||
2024-01-26 21:37:58
|
||||
2024-01-26 21:37:58
|
||||
2024-01-26 21:38:02
|
||||
2024-01-26 21:38:02
|
||||
2024-01-26 21:38:02
|
||||
drop table t1;
|
||||
# End of 10.6 tests
|
||||
|
@ -357,3 +357,18 @@ SELECT COLUMN_GET(COLUMN_CREATE(0, 'string'),0 AS CHAR CHARACTER SET latin1 COLL
|
||||
--echo #
|
||||
--echo # End of 10.2 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33318 ORDER BY COLLATE improperly applied to non-character columns
|
||||
--echo #
|
||||
set names utf8;
|
||||
create table t1 (ts datetime);
|
||||
insert t1 values ('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'),
|
||||
('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'),
|
||||
('2024-01-26 21:37:58'), ('2024-01-26 21:37:58'),
|
||||
('2024-01-26 21:37:58'), ('2024-01-26 21:38:02'),
|
||||
('2024-01-26 21:38:02'), ('2024-01-26 21:38:02');
|
||||
select * from t1 order by ts collate utf8_bin;
|
||||
drop table t1;
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
|
@ -6517,5 +6517,25 @@ SELECT 1 COLLATE latin1_swedish_ci;
|
||||
ERROR 42000: COLLATION 'latin1_swedish_ci' is not valid for CHARACTER SET 'ucs2'
|
||||
SET NAMES utf8;
|
||||
#
|
||||
# MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion
|
||||
#
|
||||
SET NAMES utf8mb3, @@collation_connection=ucs2_general_ci;
|
||||
CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET ucs2;
|
||||
INSERT INTO t1 VALUES ('a'),('A');
|
||||
CREATE OR REPLACE VIEW v1 AS
|
||||
SELECT COUNT(*) AS cnt, GROUP_CONCAT(c) AS c1 FROM t1 GROUP BY c;
|
||||
SELECT * FROM v1;
|
||||
cnt c1
|
||||
2 a,A
|
||||
SELECT HEX(c1) FROM v1;
|
||||
HEX(c1)
|
||||
0061002C0041
|
||||
SHOW CREATE VIEW v1;
|
||||
View Create View character_set_client collation_connection
|
||||
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select count(0) AS `cnt`,group_concat(`t1`.`c` separator ',') AS `c1` from `t1` group by `t1`.`c` utf8mb3 ucs2_general_ci
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
SET NAMES utf8mb3;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
|
@ -1193,6 +1193,23 @@ SELECT HEX(1 COLLATE ucs2_bin);
|
||||
SELECT 1 COLLATE latin1_swedish_ci;
|
||||
SET NAMES utf8;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion
|
||||
--echo #
|
||||
|
||||
SET NAMES utf8mb3, @@collation_connection=ucs2_general_ci;
|
||||
CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET ucs2;
|
||||
INSERT INTO t1 VALUES ('a'),('A');
|
||||
CREATE OR REPLACE VIEW v1 AS
|
||||
SELECT COUNT(*) AS cnt, GROUP_CONCAT(c) AS c1 FROM t1 GROUP BY c;
|
||||
SELECT * FROM v1;
|
||||
SELECT HEX(c1) FROM v1;
|
||||
SHOW CREATE VIEW v1;
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
SET NAMES utf8mb3;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.5 tests
|
||||
--echo #
|
||||
|
@ -89,11 +89,6 @@ id x
|
||||
300 300
|
||||
connection con2;
|
||||
update t2 set a=2 where b = 0;
|
||||
select * from t2;
|
||||
b a
|
||||
0 2
|
||||
1 20
|
||||
2 30
|
||||
update t1 set x=2 where id = 0;
|
||||
connection con1;
|
||||
update t1 set x=1 where id = 0;
|
||||
|
@ -4354,8 +4354,74 @@ a
|
||||
drop table t1, t2;
|
||||
drop view v1;
|
||||
drop procedure aproc;
|
||||
#
|
||||
# MDEV-31305: Aggregation over materialized derived table
|
||||
#
|
||||
CREATE VIEW v AS
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3;
|
||||
SELECT v.*, SUM(p) from v;
|
||||
dim1 dim2 dim3 p SUM(p)
|
||||
100 10 1 2 371
|
||||
SELECT d.*, SUM(p)
|
||||
FROM (
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
) d;
|
||||
dim1 dim2 dim3 p SUM(p)
|
||||
100 10 1 2 371
|
||||
WITH demo AS
|
||||
(
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
)
|
||||
SELECT d.*, SUM(p) FROM demo d;
|
||||
dim1 dim2 dim3 p SUM(p)
|
||||
100 10 1 2 371
|
||||
DROP VIEW v;
|
||||
# End of 10.4 tests
|
||||
#
|
||||
# MDEV-31277: 2-nd execution of PS to select from materialized view
|
||||
# specified as left join whose inner table is mergeable
|
||||
# derived containing a constant column
|
||||
#
|
||||
create table t1 (
|
||||
Election int(10) unsigned NOT NULL
|
||||
) engine=MyISAM;
|
||||
insert into t1 (Election) values (1), (4);
|
||||
create table t2 (
|
||||
VoteID int(10),
|
||||
ElectionID int(10),
|
||||
UserID int(10)
|
||||
);
|
||||
insert into t2 (ElectionID, UserID) values (2, 30), (3, 30);
|
||||
create view v1 as select * from t1
|
||||
left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T
|
||||
on T.ElectionID = t1.Election
|
||||
limit 9;
|
||||
prepare stmt1 from "select * from v1";
|
||||
execute stmt1;
|
||||
Election Voted ElectionID
|
||||
1 NULL NULL
|
||||
4 NULL NULL
|
||||
execute stmt1;
|
||||
Election Voted ElectionID
|
||||
1 NULL NULL
|
||||
4 NULL NULL
|
||||
deallocate prepare stmt1;
|
||||
drop view v1;
|
||||
drop table t1, t2;
|
||||
# End of 10.5 tests
|
||||
#
|
||||
# MDEV-31143: view with ORDER BY used in query with rownum() in WHERE
|
||||
#
|
||||
create table t1 (id int primary key);
|
||||
|
@ -2803,8 +2803,80 @@ drop table t1, t2;
|
||||
drop view v1;
|
||||
drop procedure aproc;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31305: Aggregation over materialized derived table
|
||||
--echo #
|
||||
|
||||
--source include/have_sequence.inc
|
||||
|
||||
CREATE VIEW v AS
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3;
|
||||
|
||||
SELECT v.*, SUM(p) from v;
|
||||
|
||||
SELECT d.*, SUM(p)
|
||||
FROM (
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
) d;
|
||||
|
||||
WITH demo AS
|
||||
(
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
)
|
||||
SELECT d.*, SUM(p) FROM demo d;
|
||||
|
||||
DROP VIEW v;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31277: 2-nd execution of PS to select from materialized view
|
||||
--echo # specified as left join whose inner table is mergeable
|
||||
--echo # derived containing a constant column
|
||||
--echo #
|
||||
|
||||
create table t1 (
|
||||
Election int(10) unsigned NOT NULL
|
||||
) engine=MyISAM;
|
||||
|
||||
insert into t1 (Election) values (1), (4);
|
||||
|
||||
create table t2 (
|
||||
VoteID int(10),
|
||||
ElectionID int(10),
|
||||
UserID int(10)
|
||||
);
|
||||
|
||||
insert into t2 (ElectionID, UserID) values (2, 30), (3, 30);
|
||||
create view v1 as select * from t1
|
||||
left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T
|
||||
on T.ElectionID = t1.Election
|
||||
limit 9;
|
||||
|
||||
prepare stmt1 from "select * from v1";
|
||||
|
||||
execute stmt1;
|
||||
execute stmt1;
|
||||
|
||||
deallocate prepare stmt1;
|
||||
|
||||
drop view v1;
|
||||
drop table t1, t2;
|
||||
|
||||
--echo # End of 10.5 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31143: view with ORDER BY used in query with rownum() in WHERE
|
||||
--echo #
|
||||
|
322
mysql-test/main/distinct_notembedded.result
Normal file
322
mysql-test/main/distinct_notembedded.result
Normal file
@ -0,0 +1,322 @@
|
||||
#
|
||||
# MDEV-30660 COUNT DISTINCT seems unnecessarily slow when run on a PK
|
||||
#
|
||||
set @save_optimizer_trace = @@optimizer_trace;
|
||||
SET optimizer_trace='enabled=on';
|
||||
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
|
||||
# Optimization is applied (aggregator=simple):
|
||||
SELECT COUNT(DISTINCT a) FROM t1;
|
||||
COUNT(DISTINCT a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT a), SUM(DISTINCT b) FROM t1;
|
||||
AVG(DISTINCT a) SUM(DISTINCT b)
|
||||
2.0000 1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "sum(distinct t1.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Only `a` is unique but it's enough to eliminate DISTINCT:
|
||||
SELECT COUNT(DISTINCT b, a) FROM t1;
|
||||
COUNT(DISTINCT b, a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.b,t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT a, a + b) FROM t1;
|
||||
COUNT(DISTINCT a, a + b)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a,t1.a + t1.b)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT SUM(DISTINCT a), AVG(DISTINCT a), COUNT(DISTINCT a) FROM t1 WHERE a > 1;
|
||||
SUM(DISTINCT a) AVG(DISTINCT a) COUNT(DISTINCT a)
|
||||
5 2.5000 2
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "sum(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
# Optimization is not applied 'cause function argument is not a field
|
||||
# (aggregator=distinct):
|
||||
SELECT SUM(DISTINCT a + b) FROM t1;
|
||||
SUM(DISTINCT a + b)
|
||||
9
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "sum(distinct t1.a + t1.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT b) FROM t1;
|
||||
COUNT(DISTINCT b)
|
||||
1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT b / a) FROM t1;
|
||||
AVG(DISTINCT b / a)
|
||||
0.61110000
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.b / t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 index NULL PRIMARY 4 NULL 3 Using index
|
||||
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct (/* select#2 */ select t1.a))",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
CREATE TABLE t2 (a INT);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
# Optimization is not applied 'cause there is more than one table
|
||||
SELECT COUNT(DISTINCT t1.a) FROM t1, t2;
|
||||
COUNT(DISTINCT t1.a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT t1.a) FROM t1, t2;
|
||||
AVG(DISTINCT t1.a)
|
||||
2.0000
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Const tables, optimization is applied
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1) AS t2;
|
||||
COUNT(DISTINCT a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT t1.a) FROM (SELECT 1 AS a) AS t2, t1, (SELECT 2 AS a) AS t3;
|
||||
AVG(DISTINCT t1.a)
|
||||
2.0000
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1 UNION SELECT 2) AS t2;
|
||||
COUNT(DISTINCT a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Unique index on two columns
|
||||
CREATE TABLE t3 (a INT NOT NULL, b INT NOT NULL);
|
||||
INSERT INTO t3 VALUES (1,1), (1,2), (1,3), (2,1), (2,2), (3,1), (3,2);
|
||||
CREATE UNIQUE INDEX t3_a_b ON t3 (a, b);
|
||||
# Optimization is applied:
|
||||
SELECT COUNT(DISTINCT a, b) FROM t3;
|
||||
COUNT(DISTINCT a, b)
|
||||
7
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.a,t3.b)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3;
|
||||
COUNT(DISTINCT b, a)
|
||||
7
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b,t3.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3 WHERE a < 3;
|
||||
COUNT(DISTINCT b, a)
|
||||
5
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b,t3.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
# Optimization is applied to one of the functions:
|
||||
SELECT COUNT(DISTINCT b), SUM(DISTINCT a), SUM(DISTINCT a + b) FROM t3 GROUP BY a;
|
||||
COUNT(DISTINCT b) SUM(DISTINCT a) SUM(DISTINCT a + b)
|
||||
3 1 9
|
||||
2 2 7
|
||||
2 3 9
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "sum(distinct t3.a)",
|
||||
"aggregator_type": "distinct"
|
||||
},
|
||||
{
|
||||
"function": "sum(distinct t3.a + t3.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Can't apply optimization 'cause GROUP BY argument is not a field:
|
||||
SELECT COUNT(DISTINCT b) FROM t3 GROUP BY a+b;
|
||||
COUNT(DISTINCT b)
|
||||
1
|
||||
2
|
||||
3
|
||||
1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Test merged view
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
# Optimization is applied
|
||||
SELECT COUNT(DISTINCT a, b) FROM v1;
|
||||
COUNT(DISTINCT a, b)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a,t1.b)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
# GROUP_CONCAT implements non-standard distinct aggregator
|
||||
SELECT GROUP_CONCAT(b) FROM t1;
|
||||
GROUP_CONCAT(b)
|
||||
1,1,1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "group_concat(t1.b separator ',')",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT GROUP_CONCAT(DISTINCT b) FROM t1;
|
||||
GROUP_CONCAT(DISTINCT b)
|
||||
1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "group_concat(distinct t1.b separator ',')",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
DROP TABLE t1, t2, t3;
|
||||
DROP VIEW v1;
|
||||
SET optimizer_trace = @save_optimizer_trace;
|
||||
#
|
||||
# end of 10.5 tests
|
||||
#
|
109
mysql-test/main/distinct_notembedded.test
Normal file
109
mysql-test/main/distinct_notembedded.test
Normal file
@ -0,0 +1,109 @@
|
||||
# Embedded doesn't have optimizer trace:
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30660 COUNT DISTINCT seems unnecessarily slow when run on a PK
|
||||
--echo #
|
||||
|
||||
set @save_optimizer_trace = @@optimizer_trace;
|
||||
SET optimizer_trace='enabled=on';
|
||||
let $trace=
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '\$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
|
||||
|
||||
--echo # Optimization is applied (aggregator=simple):
|
||||
SELECT COUNT(DISTINCT a) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT a), SUM(DISTINCT b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
--echo # Only `a` is unique but it's enough to eliminate DISTINCT:
|
||||
SELECT COUNT(DISTINCT b, a) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT a, a + b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT SUM(DISTINCT a), AVG(DISTINCT a), COUNT(DISTINCT a) FROM t1 WHERE a > 1;
|
||||
eval $trace;
|
||||
|
||||
--echo # Optimization is not applied 'cause function argument is not a field
|
||||
--echo # (aggregator=distinct):
|
||||
SELECT SUM(DISTINCT a + b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT b / a) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
CREATE TABLE t2 (a INT);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
|
||||
--echo # Optimization is not applied 'cause there is more than one table
|
||||
SELECT COUNT(DISTINCT t1.a) FROM t1, t2;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT t1.a) FROM t1, t2;
|
||||
eval $trace;
|
||||
|
||||
--echo # Const tables, optimization is applied
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1) AS t2;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT t1.a) FROM (SELECT 1 AS a) AS t2, t1, (SELECT 2 AS a) AS t3;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1 UNION SELECT 2) AS t2;
|
||||
eval $trace;
|
||||
|
||||
--echo # Unique index on two columns
|
||||
CREATE TABLE t3 (a INT NOT NULL, b INT NOT NULL);
|
||||
INSERT INTO t3 VALUES (1,1), (1,2), (1,3), (2,1), (2,2), (3,1), (3,2);
|
||||
CREATE UNIQUE INDEX t3_a_b ON t3 (a, b);
|
||||
--echo # Optimization is applied:
|
||||
SELECT COUNT(DISTINCT a, b) FROM t3;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3 WHERE a < 3;
|
||||
eval $trace;
|
||||
|
||||
--echo # Optimization is applied to one of the functions:
|
||||
SELECT COUNT(DISTINCT b), SUM(DISTINCT a), SUM(DISTINCT a + b) FROM t3 GROUP BY a;
|
||||
eval $trace;
|
||||
|
||||
--echo # Can't apply optimization 'cause GROUP BY argument is not a field:
|
||||
SELECT COUNT(DISTINCT b) FROM t3 GROUP BY a+b;
|
||||
eval $trace;
|
||||
|
||||
--echo # Test merged view
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
--echo # Optimization is applied
|
||||
SELECT COUNT(DISTINCT a, b) FROM v1;
|
||||
eval $trace;
|
||||
|
||||
--echo # GROUP_CONCAT implements non-standard distinct aggregator
|
||||
SELECT GROUP_CONCAT(b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT GROUP_CONCAT(DISTINCT b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
DROP VIEW v1;
|
||||
SET optimizer_trace = @save_optimizer_trace;
|
||||
--echo #
|
||||
--echo # end of 10.5 tests
|
||||
--echo #
|
@ -64,7 +64,7 @@ SET sql_mode=@mode;
|
||||
# Test litteral concat
|
||||
#
|
||||
SELECT 'a' 'b';
|
||||
a
|
||||
ab
|
||||
ab
|
||||
SELECT 'a' '';
|
||||
a
|
||||
@ -76,13 +76,13 @@ SELECT '' '';
|
||||
NULL
|
||||
NULL
|
||||
SELECT '' 'b' 'c';
|
||||
b
|
||||
bc
|
||||
bc
|
||||
SELECT '' '' 'c';
|
||||
c
|
||||
c
|
||||
SELECT 'a' '' 'c';
|
||||
a
|
||||
ac
|
||||
ac
|
||||
SELECT 'a' '' '';
|
||||
a
|
||||
@ -208,3 +208,22 @@ t1 CREATE TABLE `t1` (
|
||||
KEY `a` (`a`,`b`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-33460 select '123' 'x'; unexpected result
|
||||
#
|
||||
SELECT '';
|
||||
NULL
|
||||
NULL
|
||||
SELECT '' 'b' 'c';
|
||||
bc
|
||||
bc
|
||||
SELECT '' '' 'c';
|
||||
c
|
||||
c
|
||||
SELECT 'a' '' 'c';
|
||||
ac
|
||||
ac
|
||||
SELECT 'a' '' '';
|
||||
a
|
||||
a
|
||||
# End of 10.5 test
|
||||
|
@ -25,3 +25,15 @@ flush tables;
|
||||
update t1 set a = 2;
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33460 select '123' 'x'; unexpected result
|
||||
--echo #
|
||||
|
||||
SELECT '';
|
||||
SELECT '' 'b' 'c';
|
||||
SELECT '' '' 'c';
|
||||
SELECT 'a' '' 'c';
|
||||
SELECT 'a' '' '';
|
||||
|
||||
--echo # End of 10.5 test
|
||||
|
@ -590,3 +590,885 @@ Warning 1292 Truncated incorrect time value: '01:02:03/'
|
||||
Warning 1292 Truncated incorrect time value: '01:02:03/'
|
||||
Warning 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '01:02:03/'
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Start of 10.5 tests
|
||||
#
|
||||
#
|
||||
# MDEV-33496 Out of range error in AVG(YEAR(datetime)) due to a wrong data type
|
||||
#
|
||||
CREATE FUNCTION select01() RETURNS TEXT RETURN 'SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?';
|
||||
CREATE FUNCTION select02() RETURNS TEXT RETURN 'SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)';
|
||||
CREATE TABLE t1 (a DATETIME(6));
|
||||
INSERT INTO t1 VALUES ('2001-12-31 10:20:30.999999');
|
||||
CREATE FUNCTION params(expr TEXT, count INT) RETURNS TEXT
|
||||
BEGIN
|
||||
RETURN CONCAT(expr, REPEAT(CONCAT(', ', expr), count-1));
|
||||
END;
|
||||
$$
|
||||
CREATE PROCEDURE show_drop()
|
||||
BEGIN
|
||||
SELECT TABLE_NAME, COLUMN_TYPE, COLUMN_NAME
|
||||
FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE TABLE_SCHEMA='test'
|
||||
AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm',
|
||||
't1e_ps','t1f_ps','t2e_ps','t2f_ps')
|
||||
ORDER BY LEFT(TABLE_NAME, 2), ORDINAL_POSITION, TABLE_NAME;
|
||||
FOR rec IN (SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES
|
||||
WHERE TABLE_SCHEMA='test'
|
||||
AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm',
|
||||
't1e_ps','t1f_ps','t2e_ps','t2f_ps'))
|
||||
DO
|
||||
EXECUTE IMMEDIATE CONCAT('DROP TABLE ', rec.TABLE_NAME);
|
||||
END FOR;
|
||||
END;
|
||||
$$
|
||||
CREATE PROCEDURE p1(unit VARCHAR(32))
|
||||
BEGIN
|
||||
DECLARE do_extract BOOL DEFAULT unit NOT IN('DAYOFYEAR');
|
||||
DECLARE query01 TEXT DEFAULT
|
||||
CONCAT('CREATE TABLE t2 AS ', select01(), ' FROM t1');
|
||||
DECLARE query02 TEXT DEFAULT
|
||||
CONCAT('CREATE TABLE t2 AS ', select02(), ' FROM t1');
|
||||
IF (do_extract)
|
||||
THEN
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)'));
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)'));
|
||||
END IF;
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1f_nm'),'?', CONCAT(unit,'(a)'));
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2f_nm'),'?', CONCAT(unit,'(a)'));
|
||||
END;
|
||||
$$
|
||||
|
||||
|
||||
# EXTRACT(YEAR FROM expr) and YEAR(expr) are equivalent
|
||||
CALL p1('YEAR');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(5) EXTRACT(YEAR FROM a)
|
||||
t1e_ps int(5) ?
|
||||
t1f_nm int(5) YEAR(a)
|
||||
t1f_ps int(5) ?
|
||||
t1e_nm int(4) unsigned CAST(EXTRACT(YEAR FROM a) AS UNSIGNED)
|
||||
t1e_ps int(4) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(4) unsigned CAST(YEAR(a) AS UNSIGNED)
|
||||
t1f_ps int(4) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(5) CAST(EXTRACT(YEAR FROM a) AS SIGNED)
|
||||
t1e_ps int(5) CAST(? AS SIGNED)
|
||||
t1f_nm int(5) CAST(YEAR(a) AS SIGNED)
|
||||
t1f_ps int(5) CAST(? AS SIGNED)
|
||||
t1e_nm int(5) ABS(EXTRACT(YEAR FROM a))
|
||||
t1e_ps int(5) ABS(?)
|
||||
t1f_nm int(5) ABS(YEAR(a))
|
||||
t1f_ps int(5) ABS(?)
|
||||
t1e_nm int(5) ROUND(EXTRACT(YEAR FROM a))
|
||||
t1e_ps int(5) ROUND(?)
|
||||
t1f_nm int(5) ROUND(YEAR(a))
|
||||
t1f_ps int(5) ROUND(?)
|
||||
t1e_nm int(5) -EXTRACT(YEAR FROM a)
|
||||
t1e_ps int(5) -?
|
||||
t1f_nm int(5) -YEAR(a)
|
||||
t1f_ps int(5) -?
|
||||
t1e_nm int(6) ROUND(EXTRACT(YEAR FROM a),-1)
|
||||
t1e_ps int(6) ROUND(?,-1)
|
||||
t1f_nm int(6) ROUND(YEAR(a),-1)
|
||||
t1f_ps int(6) ROUND(?,-1)
|
||||
t1e_nm int(6) EXTRACT(YEAR FROM a)+0
|
||||
t1e_ps int(6) ?+0
|
||||
t1f_nm int(6) YEAR(a)+0
|
||||
t1f_ps int(6) ?+0
|
||||
t1e_nm decimal(6,1) EXTRACT(YEAR FROM a)+0.0
|
||||
t1e_ps decimal(6,1) ?+0.0
|
||||
t1f_nm decimal(6,1) YEAR(a)+0.0
|
||||
t1f_ps decimal(6,1) ?+0.0
|
||||
t1e_nm varchar(4) CONCAT(EXTRACT(YEAR FROM a))
|
||||
t1e_ps varchar(4) CONCAT(?)
|
||||
t1f_nm varchar(4) CONCAT(YEAR(a))
|
||||
t1f_ps varchar(4) CONCAT(?)
|
||||
t1e_nm int(5) LEAST(EXTRACT(YEAR FROM a),EXTRACT(YEAR FROM a))
|
||||
t1e_ps int(5) LEAST(?,?)
|
||||
t1f_nm int(5) LEAST(YEAR(a),YEAR(a))
|
||||
t1f_ps int(5) LEAST(?,?)
|
||||
t1e_nm int(5) COALESCE(EXTRACT(YEAR FROM a))
|
||||
t1e_ps int(5) COALESCE(?)
|
||||
t1f_nm int(5) COALESCE(YEAR(a))
|
||||
t1f_ps int(5) COALESCE(?)
|
||||
t1e_nm int(5) COALESCE(EXTRACT(YEAR FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(5) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(5) COALESCE(YEAR(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(5) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(4,0) COALESCE(EXTRACT(YEAR FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(4,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(4,0) COALESCE(YEAR(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(4,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(5) @a:=EXTRACT(YEAR FROM a)
|
||||
t1e_ps int(5) @a:=?
|
||||
t1f_nm int(5) @a:=YEAR(a)
|
||||
t1f_ps int(5) @a:=?
|
||||
t2e_nm decimal(8,4) AVG(EXTRACT(YEAR FROM a))
|
||||
t2e_ps decimal(8,4) AVG(?)
|
||||
t2f_nm decimal(8,4) AVG(YEAR(a))
|
||||
t2f_ps decimal(8,4) AVG(?)
|
||||
t2e_nm bigint(5) MIN(EXTRACT(YEAR FROM a))
|
||||
t2e_ps bigint(5) MIN(?)
|
||||
t2f_nm bigint(5) MIN(YEAR(a))
|
||||
t2f_ps bigint(5) MIN(?)
|
||||
t2e_nm bigint(5) MAX(EXTRACT(YEAR FROM a))
|
||||
t2e_ps bigint(5) MAX(?)
|
||||
t2f_nm bigint(5) MAX(YEAR(a))
|
||||
t2f_ps bigint(5) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(YEAR FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(YEAR(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(QUARTER FROM expr) and QUARTER(expr) are equavalent
|
||||
CALL p1('QUARTER');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(2) EXTRACT(QUARTER FROM a)
|
||||
t1e_ps int(2) ?
|
||||
t1f_nm int(2) QUARTER(a)
|
||||
t1f_ps int(2) ?
|
||||
t1e_nm int(1) unsigned CAST(EXTRACT(QUARTER FROM a) AS UNSIGNED)
|
||||
t1e_ps int(1) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(1) unsigned CAST(QUARTER(a) AS UNSIGNED)
|
||||
t1f_ps int(1) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(2) CAST(EXTRACT(QUARTER FROM a) AS SIGNED)
|
||||
t1e_ps int(2) CAST(? AS SIGNED)
|
||||
t1f_nm int(2) CAST(QUARTER(a) AS SIGNED)
|
||||
t1f_ps int(2) CAST(? AS SIGNED)
|
||||
t1e_nm int(2) ABS(EXTRACT(QUARTER FROM a))
|
||||
t1e_ps int(2) ABS(?)
|
||||
t1f_nm int(2) ABS(QUARTER(a))
|
||||
t1f_ps int(2) ABS(?)
|
||||
t1e_nm int(2) ROUND(EXTRACT(QUARTER FROM a))
|
||||
t1e_ps int(2) ROUND(?)
|
||||
t1f_nm int(2) ROUND(QUARTER(a))
|
||||
t1f_ps int(2) ROUND(?)
|
||||
t1e_nm int(2) -EXTRACT(QUARTER FROM a)
|
||||
t1e_ps int(2) -?
|
||||
t1f_nm int(2) -QUARTER(a)
|
||||
t1f_ps int(2) -?
|
||||
t1e_nm int(3) ROUND(EXTRACT(QUARTER FROM a),-1)
|
||||
t1e_ps int(3) ROUND(?,-1)
|
||||
t1f_nm int(3) ROUND(QUARTER(a),-1)
|
||||
t1f_ps int(3) ROUND(?,-1)
|
||||
t1e_nm int(3) EXTRACT(QUARTER FROM a)+0
|
||||
t1e_ps int(3) ?+0
|
||||
t1f_nm int(3) QUARTER(a)+0
|
||||
t1f_ps int(3) ?+0
|
||||
t1e_nm decimal(3,1) EXTRACT(QUARTER FROM a)+0.0
|
||||
t1e_ps decimal(3,1) ?+0.0
|
||||
t1f_nm decimal(3,1) QUARTER(a)+0.0
|
||||
t1f_ps decimal(3,1) ?+0.0
|
||||
t1e_nm varchar(1) CONCAT(EXTRACT(QUARTER FROM a))
|
||||
t1e_ps varchar(1) CONCAT(?)
|
||||
t1f_nm varchar(1) CONCAT(QUARTER(a))
|
||||
t1f_ps varchar(1) CONCAT(?)
|
||||
t1e_nm int(2) LEAST(EXTRACT(QUARTER FROM a),EXTRACT(QUARTER FROM a))
|
||||
t1e_ps int(2) LEAST(?,?)
|
||||
t1f_nm int(2) LEAST(QUARTER(a),QUARTER(a))
|
||||
t1f_ps int(2) LEAST(?,?)
|
||||
t1e_nm int(2) COALESCE(EXTRACT(QUARTER FROM a))
|
||||
t1e_ps int(2) COALESCE(?)
|
||||
t1f_nm int(2) COALESCE(QUARTER(a))
|
||||
t1f_ps int(2) COALESCE(?)
|
||||
t1e_nm int(2) COALESCE(EXTRACT(QUARTER FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(2) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(2) COALESCE(QUARTER(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(2) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(1,0) COALESCE(EXTRACT(QUARTER FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(1,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(1,0) COALESCE(QUARTER(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(1,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(2) @a:=EXTRACT(QUARTER FROM a)
|
||||
t1e_ps int(2) @a:=?
|
||||
t1f_nm int(2) @a:=QUARTER(a)
|
||||
t1f_ps int(2) @a:=?
|
||||
t2e_nm decimal(5,4) AVG(EXTRACT(QUARTER FROM a))
|
||||
t2e_ps decimal(5,4) AVG(?)
|
||||
t2f_nm decimal(5,4) AVG(QUARTER(a))
|
||||
t2f_ps decimal(5,4) AVG(?)
|
||||
t2e_nm bigint(2) MIN(EXTRACT(QUARTER FROM a))
|
||||
t2e_ps bigint(2) MIN(?)
|
||||
t2f_nm bigint(2) MIN(QUARTER(a))
|
||||
t2f_ps bigint(2) MIN(?)
|
||||
t2e_nm bigint(2) MAX(EXTRACT(QUARTER FROM a))
|
||||
t2e_ps bigint(2) MAX(?)
|
||||
t2f_nm bigint(2) MAX(QUARTER(a))
|
||||
t2f_ps bigint(2) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(QUARTER FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(QUARTER(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(MONTH FROM expr) and MONTH(expr) are equavalent
|
||||
CALL p1('MONTH');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(3) EXTRACT(MONTH FROM a)
|
||||
t1e_ps int(3) ?
|
||||
t1f_nm int(3) MONTH(a)
|
||||
t1f_ps int(3) ?
|
||||
t1e_nm int(2) unsigned CAST(EXTRACT(MONTH FROM a) AS UNSIGNED)
|
||||
t1e_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(2) unsigned CAST(MONTH(a) AS UNSIGNED)
|
||||
t1f_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(3) CAST(EXTRACT(MONTH FROM a) AS SIGNED)
|
||||
t1e_ps int(3) CAST(? AS SIGNED)
|
||||
t1f_nm int(3) CAST(MONTH(a) AS SIGNED)
|
||||
t1f_ps int(3) CAST(? AS SIGNED)
|
||||
t1e_nm int(3) ABS(EXTRACT(MONTH FROM a))
|
||||
t1e_ps int(3) ABS(?)
|
||||
t1f_nm int(3) ABS(MONTH(a))
|
||||
t1f_ps int(3) ABS(?)
|
||||
t1e_nm int(3) ROUND(EXTRACT(MONTH FROM a))
|
||||
t1e_ps int(3) ROUND(?)
|
||||
t1f_nm int(3) ROUND(MONTH(a))
|
||||
t1f_ps int(3) ROUND(?)
|
||||
t1e_nm int(3) -EXTRACT(MONTH FROM a)
|
||||
t1e_ps int(3) -?
|
||||
t1f_nm int(3) -MONTH(a)
|
||||
t1f_ps int(3) -?
|
||||
t1e_nm int(4) ROUND(EXTRACT(MONTH FROM a),-1)
|
||||
t1e_ps int(4) ROUND(?,-1)
|
||||
t1f_nm int(4) ROUND(MONTH(a),-1)
|
||||
t1f_ps int(4) ROUND(?,-1)
|
||||
t1e_nm int(4) EXTRACT(MONTH FROM a)+0
|
||||
t1e_ps int(4) ?+0
|
||||
t1f_nm int(4) MONTH(a)+0
|
||||
t1f_ps int(4) ?+0
|
||||
t1e_nm decimal(4,1) EXTRACT(MONTH FROM a)+0.0
|
||||
t1e_ps decimal(4,1) ?+0.0
|
||||
t1f_nm decimal(4,1) MONTH(a)+0.0
|
||||
t1f_ps decimal(4,1) ?+0.0
|
||||
t1e_nm varchar(2) CONCAT(EXTRACT(MONTH FROM a))
|
||||
t1e_ps varchar(2) CONCAT(?)
|
||||
t1f_nm varchar(2) CONCAT(MONTH(a))
|
||||
t1f_ps varchar(2) CONCAT(?)
|
||||
t1e_nm int(3) LEAST(EXTRACT(MONTH FROM a),EXTRACT(MONTH FROM a))
|
||||
t1e_ps int(3) LEAST(?,?)
|
||||
t1f_nm int(3) LEAST(MONTH(a),MONTH(a))
|
||||
t1f_ps int(3) LEAST(?,?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(MONTH FROM a))
|
||||
t1e_ps int(3) COALESCE(?)
|
||||
t1f_nm int(3) COALESCE(MONTH(a))
|
||||
t1f_ps int(3) COALESCE(?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(MONTH FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(3) COALESCE(MONTH(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(2,0) COALESCE(EXTRACT(MONTH FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(2,0) COALESCE(MONTH(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(3) @a:=EXTRACT(MONTH FROM a)
|
||||
t1e_ps int(3) @a:=?
|
||||
t1f_nm int(3) @a:=MONTH(a)
|
||||
t1f_ps int(3) @a:=?
|
||||
t2e_nm decimal(6,4) AVG(EXTRACT(MONTH FROM a))
|
||||
t2e_ps decimal(6,4) AVG(?)
|
||||
t2f_nm decimal(6,4) AVG(MONTH(a))
|
||||
t2f_ps decimal(6,4) AVG(?)
|
||||
t2e_nm bigint(3) MIN(EXTRACT(MONTH FROM a))
|
||||
t2e_ps bigint(3) MIN(?)
|
||||
t2f_nm bigint(3) MIN(MONTH(a))
|
||||
t2f_ps bigint(3) MIN(?)
|
||||
t2e_nm bigint(3) MAX(EXTRACT(MONTH FROM a))
|
||||
t2e_ps bigint(3) MAX(?)
|
||||
t2f_nm bigint(3) MAX(MONTH(a))
|
||||
t2f_ps bigint(3) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(MONTH FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(MONTH(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(WEEK FROM expr) and WEEK(expr) are equavalent
|
||||
CALL p1('WEEK');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(3) EXTRACT(WEEK FROM a)
|
||||
t1e_ps int(3) ?
|
||||
t1f_nm int(3) WEEK(a)
|
||||
t1f_ps int(3) ?
|
||||
t1e_nm int(2) unsigned CAST(EXTRACT(WEEK FROM a) AS UNSIGNED)
|
||||
t1e_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(2) unsigned CAST(WEEK(a) AS UNSIGNED)
|
||||
t1f_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(3) CAST(EXTRACT(WEEK FROM a) AS SIGNED)
|
||||
t1e_ps int(3) CAST(? AS SIGNED)
|
||||
t1f_nm int(3) CAST(WEEK(a) AS SIGNED)
|
||||
t1f_ps int(3) CAST(? AS SIGNED)
|
||||
t1e_nm int(3) ABS(EXTRACT(WEEK FROM a))
|
||||
t1e_ps int(3) ABS(?)
|
||||
t1f_nm int(3) ABS(WEEK(a))
|
||||
t1f_ps int(3) ABS(?)
|
||||
t1e_nm int(3) ROUND(EXTRACT(WEEK FROM a))
|
||||
t1e_ps int(3) ROUND(?)
|
||||
t1f_nm int(3) ROUND(WEEK(a))
|
||||
t1f_ps int(3) ROUND(?)
|
||||
t1e_nm int(3) -EXTRACT(WEEK FROM a)
|
||||
t1e_ps int(3) -?
|
||||
t1f_nm int(3) -WEEK(a)
|
||||
t1f_ps int(3) -?
|
||||
t1e_nm int(4) ROUND(EXTRACT(WEEK FROM a),-1)
|
||||
t1e_ps int(4) ROUND(?,-1)
|
||||
t1f_nm int(4) ROUND(WEEK(a),-1)
|
||||
t1f_ps int(4) ROUND(?,-1)
|
||||
t1e_nm int(4) EXTRACT(WEEK FROM a)+0
|
||||
t1e_ps int(4) ?+0
|
||||
t1f_nm int(4) WEEK(a)+0
|
||||
t1f_ps int(4) ?+0
|
||||
t1e_nm decimal(4,1) EXTRACT(WEEK FROM a)+0.0
|
||||
t1e_ps decimal(4,1) ?+0.0
|
||||
t1f_nm decimal(4,1) WEEK(a)+0.0
|
||||
t1f_ps decimal(4,1) ?+0.0
|
||||
t1e_nm varchar(2) CONCAT(EXTRACT(WEEK FROM a))
|
||||
t1e_ps varchar(2) CONCAT(?)
|
||||
t1f_nm varchar(2) CONCAT(WEEK(a))
|
||||
t1f_ps varchar(2) CONCAT(?)
|
||||
t1e_nm int(3) LEAST(EXTRACT(WEEK FROM a),EXTRACT(WEEK FROM a))
|
||||
t1e_ps int(3) LEAST(?,?)
|
||||
t1f_nm int(3) LEAST(WEEK(a),WEEK(a))
|
||||
t1f_ps int(3) LEAST(?,?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(WEEK FROM a))
|
||||
t1e_ps int(3) COALESCE(?)
|
||||
t1f_nm int(3) COALESCE(WEEK(a))
|
||||
t1f_ps int(3) COALESCE(?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(WEEK FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(3) COALESCE(WEEK(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(2,0) COALESCE(EXTRACT(WEEK FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(2,0) COALESCE(WEEK(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(3) @a:=EXTRACT(WEEK FROM a)
|
||||
t1e_ps int(3) @a:=?
|
||||
t1f_nm int(3) @a:=WEEK(a)
|
||||
t1f_ps int(3) @a:=?
|
||||
t2e_nm decimal(6,4) AVG(EXTRACT(WEEK FROM a))
|
||||
t2e_ps decimal(6,4) AVG(?)
|
||||
t2f_nm decimal(6,4) AVG(WEEK(a))
|
||||
t2f_ps decimal(6,4) AVG(?)
|
||||
t2e_nm bigint(3) MIN(EXTRACT(WEEK FROM a))
|
||||
t2e_ps bigint(3) MIN(?)
|
||||
t2f_nm bigint(3) MIN(WEEK(a))
|
||||
t2f_ps bigint(3) MIN(?)
|
||||
t2e_nm bigint(3) MAX(EXTRACT(WEEK FROM a))
|
||||
t2e_ps bigint(3) MAX(?)
|
||||
t2f_nm bigint(3) MAX(WEEK(a))
|
||||
t2f_ps bigint(3) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(WEEK FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(WEEK(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(DAY FROM expr) returns hours/24 and includes the sign for TIME
|
||||
# DAY(expr) returns the DD part of CAST(expr AS DATETIME)
|
||||
CALL p1('DAY');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(3) EXTRACT(DAY FROM a)
|
||||
t1e_ps int(3) ?
|
||||
t1f_nm int(3) DAY(a)
|
||||
t1f_ps int(3) ?
|
||||
t1e_nm bigint(20) unsigned CAST(EXTRACT(DAY FROM a) AS UNSIGNED)
|
||||
t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(2) unsigned CAST(DAY(a) AS UNSIGNED)
|
||||
t1f_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(3) CAST(EXTRACT(DAY FROM a) AS SIGNED)
|
||||
t1e_ps int(3) CAST(? AS SIGNED)
|
||||
t1f_nm int(3) CAST(DAY(a) AS SIGNED)
|
||||
t1f_ps int(3) CAST(? AS SIGNED)
|
||||
t1e_nm int(3) ABS(EXTRACT(DAY FROM a))
|
||||
t1e_ps int(3) ABS(?)
|
||||
t1f_nm int(3) ABS(DAY(a))
|
||||
t1f_ps int(3) ABS(?)
|
||||
t1e_nm int(3) ROUND(EXTRACT(DAY FROM a))
|
||||
t1e_ps int(3) ROUND(?)
|
||||
t1f_nm int(3) ROUND(DAY(a))
|
||||
t1f_ps int(3) ROUND(?)
|
||||
t1e_nm int(4) -EXTRACT(DAY FROM a)
|
||||
t1e_ps int(4) -?
|
||||
t1f_nm int(3) -DAY(a)
|
||||
t1f_ps int(3) -?
|
||||
t1e_nm int(4) ROUND(EXTRACT(DAY FROM a),-1)
|
||||
t1e_ps int(4) ROUND(?,-1)
|
||||
t1f_nm int(4) ROUND(DAY(a),-1)
|
||||
t1f_ps int(4) ROUND(?,-1)
|
||||
t1e_nm int(4) EXTRACT(DAY FROM a)+0
|
||||
t1e_ps int(4) ?+0
|
||||
t1f_nm int(4) DAY(a)+0
|
||||
t1f_ps int(4) ?+0
|
||||
t1e_nm decimal(4,1) EXTRACT(DAY FROM a)+0.0
|
||||
t1e_ps decimal(4,1) ?+0.0
|
||||
t1f_nm decimal(4,1) DAY(a)+0.0
|
||||
t1f_ps decimal(4,1) ?+0.0
|
||||
t1e_nm varchar(3) CONCAT(EXTRACT(DAY FROM a))
|
||||
t1e_ps varchar(3) CONCAT(?)
|
||||
t1f_nm varchar(2) CONCAT(DAY(a))
|
||||
t1f_ps varchar(2) CONCAT(?)
|
||||
t1e_nm int(3) LEAST(EXTRACT(DAY FROM a),EXTRACT(DAY FROM a))
|
||||
t1e_ps int(3) LEAST(?,?)
|
||||
t1f_nm int(3) LEAST(DAY(a),DAY(a))
|
||||
t1f_ps int(3) LEAST(?,?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(DAY FROM a))
|
||||
t1e_ps int(3) COALESCE(?)
|
||||
t1f_nm int(3) COALESCE(DAY(a))
|
||||
t1f_ps int(3) COALESCE(?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(DAY FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(3) COALESCE(DAY(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(2,0) COALESCE(EXTRACT(DAY FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(2,0) COALESCE(DAY(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(3) @a:=EXTRACT(DAY FROM a)
|
||||
t1e_ps int(3) @a:=?
|
||||
t1f_nm int(3) @a:=DAY(a)
|
||||
t1f_ps int(3) @a:=?
|
||||
t2e_nm decimal(6,4) AVG(EXTRACT(DAY FROM a))
|
||||
t2e_ps decimal(6,4) AVG(?)
|
||||
t2f_nm decimal(6,4) AVG(DAY(a))
|
||||
t2f_ps decimal(6,4) AVG(?)
|
||||
t2e_nm bigint(3) MIN(EXTRACT(DAY FROM a))
|
||||
t2e_ps bigint(3) MIN(?)
|
||||
t2f_nm bigint(3) MIN(DAY(a))
|
||||
t2f_ps bigint(3) MIN(?)
|
||||
t2e_nm bigint(3) MAX(EXTRACT(DAY FROM a))
|
||||
t2e_ps bigint(3) MAX(?)
|
||||
t2f_nm bigint(3) MAX(DAY(a))
|
||||
t2f_ps bigint(3) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(DAY FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(DAY(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(HOUR FROM expr) returns hours%24 and includes the sign for TIME
|
||||
# HOUR(expr) returns the hh part of CAST(expr AS DATETIME)
|
||||
CALL p1('HOUR');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(3) EXTRACT(HOUR FROM a)
|
||||
t1e_ps int(3) ?
|
||||
t1f_nm int(3) HOUR(a)
|
||||
t1f_ps int(3) ?
|
||||
t1e_nm bigint(20) unsigned CAST(EXTRACT(HOUR FROM a) AS UNSIGNED)
|
||||
t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(2) unsigned CAST(HOUR(a) AS UNSIGNED)
|
||||
t1f_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(3) CAST(EXTRACT(HOUR FROM a) AS SIGNED)
|
||||
t1e_ps int(3) CAST(? AS SIGNED)
|
||||
t1f_nm int(3) CAST(HOUR(a) AS SIGNED)
|
||||
t1f_ps int(3) CAST(? AS SIGNED)
|
||||
t1e_nm int(3) ABS(EXTRACT(HOUR FROM a))
|
||||
t1e_ps int(3) ABS(?)
|
||||
t1f_nm int(3) ABS(HOUR(a))
|
||||
t1f_ps int(3) ABS(?)
|
||||
t1e_nm int(3) ROUND(EXTRACT(HOUR FROM a))
|
||||
t1e_ps int(3) ROUND(?)
|
||||
t1f_nm int(3) ROUND(HOUR(a))
|
||||
t1f_ps int(3) ROUND(?)
|
||||
t1e_nm int(4) -EXTRACT(HOUR FROM a)
|
||||
t1e_ps int(4) -?
|
||||
t1f_nm int(3) -HOUR(a)
|
||||
t1f_ps int(3) -?
|
||||
t1e_nm int(4) ROUND(EXTRACT(HOUR FROM a),-1)
|
||||
t1e_ps int(4) ROUND(?,-1)
|
||||
t1f_nm int(4) ROUND(HOUR(a),-1)
|
||||
t1f_ps int(4) ROUND(?,-1)
|
||||
t1e_nm int(4) EXTRACT(HOUR FROM a)+0
|
||||
t1e_ps int(4) ?+0
|
||||
t1f_nm int(4) HOUR(a)+0
|
||||
t1f_ps int(4) ?+0
|
||||
t1e_nm decimal(4,1) EXTRACT(HOUR FROM a)+0.0
|
||||
t1e_ps decimal(4,1) ?+0.0
|
||||
t1f_nm decimal(4,1) HOUR(a)+0.0
|
||||
t1f_ps decimal(4,1) ?+0.0
|
||||
t1e_nm varchar(3) CONCAT(EXTRACT(HOUR FROM a))
|
||||
t1e_ps varchar(3) CONCAT(?)
|
||||
t1f_nm varchar(2) CONCAT(HOUR(a))
|
||||
t1f_ps varchar(2) CONCAT(?)
|
||||
t1e_nm int(3) LEAST(EXTRACT(HOUR FROM a),EXTRACT(HOUR FROM a))
|
||||
t1e_ps int(3) LEAST(?,?)
|
||||
t1f_nm int(3) LEAST(HOUR(a),HOUR(a))
|
||||
t1f_ps int(3) LEAST(?,?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(HOUR FROM a))
|
||||
t1e_ps int(3) COALESCE(?)
|
||||
t1f_nm int(3) COALESCE(HOUR(a))
|
||||
t1f_ps int(3) COALESCE(?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(HOUR FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(3) COALESCE(HOUR(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(2,0) COALESCE(EXTRACT(HOUR FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(2,0) COALESCE(HOUR(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(3) @a:=EXTRACT(HOUR FROM a)
|
||||
t1e_ps int(3) @a:=?
|
||||
t1f_nm int(3) @a:=HOUR(a)
|
||||
t1f_ps int(3) @a:=?
|
||||
t2e_nm decimal(6,4) AVG(EXTRACT(HOUR FROM a))
|
||||
t2e_ps decimal(6,4) AVG(?)
|
||||
t2f_nm decimal(6,4) AVG(HOUR(a))
|
||||
t2f_ps decimal(6,4) AVG(?)
|
||||
t2e_nm bigint(3) MIN(EXTRACT(HOUR FROM a))
|
||||
t2e_ps bigint(3) MIN(?)
|
||||
t2f_nm bigint(3) MIN(HOUR(a))
|
||||
t2f_ps bigint(3) MIN(?)
|
||||
t2e_nm bigint(3) MAX(EXTRACT(HOUR FROM a))
|
||||
t2e_ps bigint(3) MAX(?)
|
||||
t2f_nm bigint(3) MAX(HOUR(a))
|
||||
t2f_ps bigint(3) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(HOUR FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(HOUR(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(MINUTE FROM expr) includes the sign for TIME
|
||||
# MINUTE(expr) returns the absolute value
|
||||
CALL p1('MINUTE');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(3) EXTRACT(MINUTE FROM a)
|
||||
t1e_ps int(3) ?
|
||||
t1f_nm int(3) MINUTE(a)
|
||||
t1f_ps int(3) ?
|
||||
t1e_nm bigint(20) unsigned CAST(EXTRACT(MINUTE FROM a) AS UNSIGNED)
|
||||
t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(2) unsigned CAST(MINUTE(a) AS UNSIGNED)
|
||||
t1f_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(3) CAST(EXTRACT(MINUTE FROM a) AS SIGNED)
|
||||
t1e_ps int(3) CAST(? AS SIGNED)
|
||||
t1f_nm int(3) CAST(MINUTE(a) AS SIGNED)
|
||||
t1f_ps int(3) CAST(? AS SIGNED)
|
||||
t1e_nm int(3) ABS(EXTRACT(MINUTE FROM a))
|
||||
t1e_ps int(3) ABS(?)
|
||||
t1f_nm int(3) ABS(MINUTE(a))
|
||||
t1f_ps int(3) ABS(?)
|
||||
t1e_nm int(3) ROUND(EXTRACT(MINUTE FROM a))
|
||||
t1e_ps int(3) ROUND(?)
|
||||
t1f_nm int(3) ROUND(MINUTE(a))
|
||||
t1f_ps int(3) ROUND(?)
|
||||
t1e_nm int(4) -EXTRACT(MINUTE FROM a)
|
||||
t1e_ps int(4) -?
|
||||
t1f_nm int(3) -MINUTE(a)
|
||||
t1f_ps int(3) -?
|
||||
t1e_nm int(4) ROUND(EXTRACT(MINUTE FROM a),-1)
|
||||
t1e_ps int(4) ROUND(?,-1)
|
||||
t1f_nm int(4) ROUND(MINUTE(a),-1)
|
||||
t1f_ps int(4) ROUND(?,-1)
|
||||
t1e_nm int(4) EXTRACT(MINUTE FROM a)+0
|
||||
t1e_ps int(4) ?+0
|
||||
t1f_nm int(4) MINUTE(a)+0
|
||||
t1f_ps int(4) ?+0
|
||||
t1e_nm decimal(4,1) EXTRACT(MINUTE FROM a)+0.0
|
||||
t1e_ps decimal(4,1) ?+0.0
|
||||
t1f_nm decimal(4,1) MINUTE(a)+0.0
|
||||
t1f_ps decimal(4,1) ?+0.0
|
||||
t1e_nm varchar(3) CONCAT(EXTRACT(MINUTE FROM a))
|
||||
t1e_ps varchar(3) CONCAT(?)
|
||||
t1f_nm varchar(2) CONCAT(MINUTE(a))
|
||||
t1f_ps varchar(2) CONCAT(?)
|
||||
t1e_nm int(3) LEAST(EXTRACT(MINUTE FROM a),EXTRACT(MINUTE FROM a))
|
||||
t1e_ps int(3) LEAST(?,?)
|
||||
t1f_nm int(3) LEAST(MINUTE(a),MINUTE(a))
|
||||
t1f_ps int(3) LEAST(?,?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(MINUTE FROM a))
|
||||
t1e_ps int(3) COALESCE(?)
|
||||
t1f_nm int(3) COALESCE(MINUTE(a))
|
||||
t1f_ps int(3) COALESCE(?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(MINUTE FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(3) COALESCE(MINUTE(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(2,0) COALESCE(EXTRACT(MINUTE FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(2,0) COALESCE(MINUTE(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(3) @a:=EXTRACT(MINUTE FROM a)
|
||||
t1e_ps int(3) @a:=?
|
||||
t1f_nm int(3) @a:=MINUTE(a)
|
||||
t1f_ps int(3) @a:=?
|
||||
t2e_nm decimal(6,4) AVG(EXTRACT(MINUTE FROM a))
|
||||
t2e_ps decimal(6,4) AVG(?)
|
||||
t2f_nm decimal(6,4) AVG(MINUTE(a))
|
||||
t2f_ps decimal(6,4) AVG(?)
|
||||
t2e_nm bigint(3) MIN(EXTRACT(MINUTE FROM a))
|
||||
t2e_ps bigint(3) MIN(?)
|
||||
t2f_nm bigint(3) MIN(MINUTE(a))
|
||||
t2f_ps bigint(3) MIN(?)
|
||||
t2e_nm bigint(3) MAX(EXTRACT(MINUTE FROM a))
|
||||
t2e_ps bigint(3) MAX(?)
|
||||
t2f_nm bigint(3) MAX(MINUTE(a))
|
||||
t2f_ps bigint(3) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(MINUTE FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(MINUTE(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(SECONDS FROM expr) includes the sign for TIME
|
||||
# SECONDS(expr) returns the absolute value
|
||||
CALL p1('SECOND');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(3) EXTRACT(SECOND FROM a)
|
||||
t1e_ps int(3) ?
|
||||
t1f_nm int(3) SECOND(a)
|
||||
t1f_ps int(3) ?
|
||||
t1e_nm bigint(20) unsigned CAST(EXTRACT(SECOND FROM a) AS UNSIGNED)
|
||||
t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(2) unsigned CAST(SECOND(a) AS UNSIGNED)
|
||||
t1f_ps int(2) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(3) CAST(EXTRACT(SECOND FROM a) AS SIGNED)
|
||||
t1e_ps int(3) CAST(? AS SIGNED)
|
||||
t1f_nm int(3) CAST(SECOND(a) AS SIGNED)
|
||||
t1f_ps int(3) CAST(? AS SIGNED)
|
||||
t1e_nm int(3) ABS(EXTRACT(SECOND FROM a))
|
||||
t1e_ps int(3) ABS(?)
|
||||
t1f_nm int(3) ABS(SECOND(a))
|
||||
t1f_ps int(3) ABS(?)
|
||||
t1e_nm int(3) ROUND(EXTRACT(SECOND FROM a))
|
||||
t1e_ps int(3) ROUND(?)
|
||||
t1f_nm int(3) ROUND(SECOND(a))
|
||||
t1f_ps int(3) ROUND(?)
|
||||
t1e_nm int(4) -EXTRACT(SECOND FROM a)
|
||||
t1e_ps int(4) -?
|
||||
t1f_nm int(3) -SECOND(a)
|
||||
t1f_ps int(3) -?
|
||||
t1e_nm int(4) ROUND(EXTRACT(SECOND FROM a),-1)
|
||||
t1e_ps int(4) ROUND(?,-1)
|
||||
t1f_nm int(4) ROUND(SECOND(a),-1)
|
||||
t1f_ps int(4) ROUND(?,-1)
|
||||
t1e_nm int(4) EXTRACT(SECOND FROM a)+0
|
||||
t1e_ps int(4) ?+0
|
||||
t1f_nm int(4) SECOND(a)+0
|
||||
t1f_ps int(4) ?+0
|
||||
t1e_nm decimal(4,1) EXTRACT(SECOND FROM a)+0.0
|
||||
t1e_ps decimal(4,1) ?+0.0
|
||||
t1f_nm decimal(4,1) SECOND(a)+0.0
|
||||
t1f_ps decimal(4,1) ?+0.0
|
||||
t1e_nm varchar(3) CONCAT(EXTRACT(SECOND FROM a))
|
||||
t1e_ps varchar(3) CONCAT(?)
|
||||
t1f_nm varchar(2) CONCAT(SECOND(a))
|
||||
t1f_ps varchar(2) CONCAT(?)
|
||||
t1e_nm int(3) LEAST(EXTRACT(SECOND FROM a),EXTRACT(SECOND FROM a))
|
||||
t1e_ps int(3) LEAST(?,?)
|
||||
t1f_nm int(3) LEAST(SECOND(a),SECOND(a))
|
||||
t1f_ps int(3) LEAST(?,?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(SECOND FROM a))
|
||||
t1e_ps int(3) COALESCE(?)
|
||||
t1f_nm int(3) COALESCE(SECOND(a))
|
||||
t1f_ps int(3) COALESCE(?)
|
||||
t1e_nm int(3) COALESCE(EXTRACT(SECOND FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(3) COALESCE(SECOND(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(2,0) COALESCE(EXTRACT(SECOND FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(2,0) COALESCE(SECOND(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(3) @a:=EXTRACT(SECOND FROM a)
|
||||
t1e_ps int(3) @a:=?
|
||||
t1f_nm int(3) @a:=SECOND(a)
|
||||
t1f_ps int(3) @a:=?
|
||||
t2e_nm decimal(6,4) AVG(EXTRACT(SECOND FROM a))
|
||||
t2e_ps decimal(6,4) AVG(?)
|
||||
t2f_nm decimal(6,4) AVG(SECOND(a))
|
||||
t2f_ps decimal(6,4) AVG(?)
|
||||
t2e_nm bigint(3) MIN(EXTRACT(SECOND FROM a))
|
||||
t2e_ps bigint(3) MIN(?)
|
||||
t2f_nm bigint(3) MIN(SECOND(a))
|
||||
t2f_ps bigint(3) MIN(?)
|
||||
t2e_nm bigint(3) MAX(EXTRACT(SECOND FROM a))
|
||||
t2e_ps bigint(3) MAX(?)
|
||||
t2f_nm bigint(3) MAX(SECOND(a))
|
||||
t2f_ps bigint(3) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(SECOND FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(SECOND(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# EXTRACT(MICROSECONDS FROM expr) includes the sign for TIME
|
||||
# MICROSECONDS(expr) returns the absolute value
|
||||
CALL p1('MICROSECOND');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1e_nm int(7) EXTRACT(MICROSECOND FROM a)
|
||||
t1e_ps int(7) ?
|
||||
t1f_nm int(7) MICROSECOND(a)
|
||||
t1f_ps int(7) ?
|
||||
t1e_nm bigint(20) unsigned CAST(EXTRACT(MICROSECOND FROM a) AS UNSIGNED)
|
||||
t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(6) unsigned CAST(MICROSECOND(a) AS UNSIGNED)
|
||||
t1f_ps int(6) unsigned CAST(? AS UNSIGNED)
|
||||
t1e_nm int(7) CAST(EXTRACT(MICROSECOND FROM a) AS SIGNED)
|
||||
t1e_ps int(7) CAST(? AS SIGNED)
|
||||
t1f_nm int(7) CAST(MICROSECOND(a) AS SIGNED)
|
||||
t1f_ps int(7) CAST(? AS SIGNED)
|
||||
t1e_nm int(7) ABS(EXTRACT(MICROSECOND FROM a))
|
||||
t1e_ps int(7) ABS(?)
|
||||
t1f_nm int(7) ABS(MICROSECOND(a))
|
||||
t1f_ps int(7) ABS(?)
|
||||
t1e_nm int(7) ROUND(EXTRACT(MICROSECOND FROM a))
|
||||
t1e_ps int(7) ROUND(?)
|
||||
t1f_nm int(7) ROUND(MICROSECOND(a))
|
||||
t1f_ps int(7) ROUND(?)
|
||||
t1e_nm int(8) -EXTRACT(MICROSECOND FROM a)
|
||||
t1e_ps int(8) -?
|
||||
t1f_nm int(7) -MICROSECOND(a)
|
||||
t1f_ps int(7) -?
|
||||
t1e_nm int(8) ROUND(EXTRACT(MICROSECOND FROM a),-1)
|
||||
t1e_ps int(8) ROUND(?,-1)
|
||||
t1f_nm int(8) ROUND(MICROSECOND(a),-1)
|
||||
t1f_ps int(8) ROUND(?,-1)
|
||||
t1e_nm int(8) EXTRACT(MICROSECOND FROM a)+0
|
||||
t1e_ps int(8) ?+0
|
||||
t1f_nm int(8) MICROSECOND(a)+0
|
||||
t1f_ps int(8) ?+0
|
||||
t1e_nm decimal(8,1) EXTRACT(MICROSECOND FROM a)+0.0
|
||||
t1e_ps decimal(8,1) ?+0.0
|
||||
t1f_nm decimal(8,1) MICROSECOND(a)+0.0
|
||||
t1f_ps decimal(8,1) ?+0.0
|
||||
t1e_nm varchar(7) CONCAT(EXTRACT(MICROSECOND FROM a))
|
||||
t1e_ps varchar(7) CONCAT(?)
|
||||
t1f_nm varchar(6) CONCAT(MICROSECOND(a))
|
||||
t1f_ps varchar(6) CONCAT(?)
|
||||
t1e_nm int(7) LEAST(EXTRACT(MICROSECOND FROM a),EXTRACT(MICROSECOND FROM a))
|
||||
t1e_ps int(7) LEAST(?,?)
|
||||
t1f_nm int(7) LEAST(MICROSECOND(a),MICROSECOND(a))
|
||||
t1f_ps int(7) LEAST(?,?)
|
||||
t1e_nm int(7) COALESCE(EXTRACT(MICROSECOND FROM a))
|
||||
t1e_ps int(7) COALESCE(?)
|
||||
t1f_nm int(7) COALESCE(MICROSECOND(a))
|
||||
t1f_ps int(7) COALESCE(?)
|
||||
t1e_nm int(7) COALESCE(EXTRACT(MICROSECOND FROM a),CAST(1 AS SIGNED))
|
||||
t1e_ps int(7) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm int(7) COALESCE(MICROSECOND(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(7) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1e_nm decimal(6,0) COALESCE(EXTRACT(MICROSECOND FROM a),CAST(1 AS UNSIGNED))
|
||||
t1e_ps decimal(6,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm decimal(6,0) COALESCE(MICROSECOND(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(6,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1e_nm int(7) @a:=EXTRACT(MICROSECOND FROM a)
|
||||
t1e_ps int(7) @a:=?
|
||||
t1f_nm int(7) @a:=MICROSECOND(a)
|
||||
t1f_ps int(7) @a:=?
|
||||
t2e_nm decimal(10,4) AVG(EXTRACT(MICROSECOND FROM a))
|
||||
t2e_ps decimal(10,4) AVG(?)
|
||||
t2f_nm decimal(10,4) AVG(MICROSECOND(a))
|
||||
t2f_ps decimal(10,4) AVG(?)
|
||||
t2e_nm bigint(7) MIN(EXTRACT(MICROSECOND FROM a))
|
||||
t2e_ps bigint(7) MIN(?)
|
||||
t2f_nm bigint(7) MIN(MICROSECOND(a))
|
||||
t2f_ps bigint(7) MIN(?)
|
||||
t2e_nm bigint(7) MAX(EXTRACT(MICROSECOND FROM a))
|
||||
t2e_ps bigint(7) MAX(?)
|
||||
t2f_nm bigint(7) MAX(MICROSECOND(a))
|
||||
t2f_ps bigint(7) MAX(?)
|
||||
t2e_nm mediumtext GROUP_CONCAT(EXTRACT(MICROSECOND FROM a))
|
||||
t2e_ps mediumtext GROUP_CONCAT(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(MICROSECOND(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
|
||||
|
||||
# DAYOFYEAR
|
||||
CALL p1('DAYOFYEAR');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999');
|
||||
CALL show_drop;
|
||||
TABLE_NAME COLUMN_TYPE COLUMN_NAME
|
||||
t1f_nm int(4) DAYOFYEAR(a)
|
||||
t1f_ps int(4) ?
|
||||
t1f_nm int(3) unsigned CAST(DAYOFYEAR(a) AS UNSIGNED)
|
||||
t1f_ps int(3) unsigned CAST(? AS UNSIGNED)
|
||||
t1f_nm int(4) CAST(DAYOFYEAR(a) AS SIGNED)
|
||||
t1f_ps int(4) CAST(? AS SIGNED)
|
||||
t1f_nm int(4) ABS(DAYOFYEAR(a))
|
||||
t1f_ps int(4) ABS(?)
|
||||
t1f_nm int(4) ROUND(DAYOFYEAR(a))
|
||||
t1f_ps int(4) ROUND(?)
|
||||
t1f_nm int(4) -DAYOFYEAR(a)
|
||||
t1f_ps int(4) -?
|
||||
t1f_nm int(5) ROUND(DAYOFYEAR(a),-1)
|
||||
t1f_ps int(5) ROUND(?,-1)
|
||||
t1f_nm int(5) DAYOFYEAR(a)+0
|
||||
t1f_ps int(5) ?+0
|
||||
t1f_nm decimal(5,1) DAYOFYEAR(a)+0.0
|
||||
t1f_ps decimal(5,1) ?+0.0
|
||||
t1f_nm varchar(3) CONCAT(DAYOFYEAR(a))
|
||||
t1f_ps varchar(3) CONCAT(?)
|
||||
t1f_nm int(4) LEAST(DAYOFYEAR(a),DAYOFYEAR(a))
|
||||
t1f_ps int(4) LEAST(?,?)
|
||||
t1f_nm int(4) COALESCE(DAYOFYEAR(a))
|
||||
t1f_ps int(4) COALESCE(?)
|
||||
t1f_nm int(4) COALESCE(DAYOFYEAR(a),CAST(1 AS SIGNED))
|
||||
t1f_ps int(4) COALESCE(?,CAST(1 AS SIGNED))
|
||||
t1f_nm decimal(3,0) COALESCE(DAYOFYEAR(a),CAST(1 AS UNSIGNED))
|
||||
t1f_ps decimal(3,0) COALESCE(?,CAST(1 AS UNSIGNED))
|
||||
t1f_nm int(4) @a:=DAYOFYEAR(a)
|
||||
t1f_ps int(4) @a:=?
|
||||
t2f_nm decimal(7,4) AVG(DAYOFYEAR(a))
|
||||
t2f_ps decimal(7,4) AVG(?)
|
||||
t2f_nm bigint(4) MIN(DAYOFYEAR(a))
|
||||
t2f_ps bigint(4) MIN(?)
|
||||
t2f_nm bigint(4) MAX(DAYOFYEAR(a))
|
||||
t2f_ps bigint(4) MAX(?)
|
||||
t2f_nm mediumtext GROUP_CONCAT(DAYOFYEAR(a))
|
||||
t2f_ps mediumtext GROUP_CONCAT(?)
|
||||
DROP TABLE t1;
|
||||
DROP PROCEDURE p1;
|
||||
DROP PROCEDURE show_drop;
|
||||
DROP FUNCTION params;
|
||||
DROP FUNCTION select01;
|
||||
DROP FUNCTION select02;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
|
@ -263,3 +263,254 @@ SELECT
|
||||
FROM t1;
|
||||
DROP TABLE t1;
|
||||
--enable_view_protocol
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.5 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33496 Out of range error in AVG(YEAR(datetime)) due to a wrong data type
|
||||
--echo #
|
||||
|
||||
let select01=SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?;
|
||||
let pcount01=16;
|
||||
let select02=SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?);
|
||||
let pcount02=4;
|
||||
let ts=TIMESTAMP'2001-12-13 10:20:30.999999';
|
||||
|
||||
eval CREATE FUNCTION select01() RETURNS TEXT RETURN '$select01';
|
||||
eval CREATE FUNCTION select02() RETURNS TEXT RETURN '$select02';
|
||||
|
||||
CREATE TABLE t1 (a DATETIME(6));
|
||||
INSERT INTO t1 VALUES ('2001-12-31 10:20:30.999999');
|
||||
|
||||
DELIMITER $$;
|
||||
CREATE FUNCTION params(expr TEXT, count INT) RETURNS TEXT
|
||||
BEGIN
|
||||
RETURN CONCAT(expr, REPEAT(CONCAT(', ', expr), count-1));
|
||||
END;
|
||||
$$
|
||||
CREATE PROCEDURE show_drop()
|
||||
BEGIN
|
||||
SELECT TABLE_NAME, COLUMN_TYPE, COLUMN_NAME
|
||||
FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE TABLE_SCHEMA='test'
|
||||
AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm',
|
||||
't1e_ps','t1f_ps','t2e_ps','t2f_ps')
|
||||
ORDER BY LEFT(TABLE_NAME, 2), ORDINAL_POSITION, TABLE_NAME;
|
||||
|
||||
FOR rec IN (SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES
|
||||
WHERE TABLE_SCHEMA='test'
|
||||
AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm',
|
||||
't1e_ps','t1f_ps','t2e_ps','t2f_ps'))
|
||||
DO
|
||||
EXECUTE IMMEDIATE CONCAT('DROP TABLE ', rec.TABLE_NAME);
|
||||
END FOR;
|
||||
END;
|
||||
$$
|
||||
CREATE PROCEDURE p1(unit VARCHAR(32))
|
||||
BEGIN
|
||||
DECLARE do_extract BOOL DEFAULT unit NOT IN('DAYOFYEAR');
|
||||
|
||||
DECLARE query01 TEXT DEFAULT
|
||||
CONCAT('CREATE TABLE t2 AS ', select01(), ' FROM t1');
|
||||
|
||||
DECLARE query02 TEXT DEFAULT
|
||||
CONCAT('CREATE TABLE t2 AS ', select02(), ' FROM t1');
|
||||
|
||||
IF (do_extract)
|
||||
THEN
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)'));
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)'));
|
||||
END IF;
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1f_nm'),'?', CONCAT(unit,'(a)'));
|
||||
EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2f_nm'),'?', CONCAT(unit,'(a)'));
|
||||
END;
|
||||
$$
|
||||
DELIMITER ;$$
|
||||
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(YEAR FROM expr) and YEAR(expr) are equivalent
|
||||
|
||||
CALL p1('YEAR');
|
||||
let extr=EXTRACT(YEAR FROM $ts);
|
||||
let func=YEAR($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(QUARTER FROM expr) and QUARTER(expr) are equavalent
|
||||
|
||||
CALL p1('QUARTER');
|
||||
let extr=EXTRACT(QUARTER FROM $ts);
|
||||
let func=QUARTER($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(MONTH FROM expr) and MONTH(expr) are equavalent
|
||||
|
||||
CALL p1('MONTH');
|
||||
let extr=EXTRACT(MONTH FROM $ts);
|
||||
let func=MONTH($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(WEEK FROM expr) and WEEK(expr) are equavalent
|
||||
|
||||
CALL p1('WEEK');
|
||||
let extr=EXTRACT(WEEK FROM $ts);
|
||||
let func=WEEK($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(DAY FROM expr) returns hours/24 and includes the sign for TIME
|
||||
--echo # DAY(expr) returns the DD part of CAST(expr AS DATETIME)
|
||||
|
||||
CALL p1('DAY');
|
||||
let extr=EXTRACT(DAY FROM $ts);
|
||||
let func=DAY($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(HOUR FROM expr) returns hours%24 and includes the sign for TIME
|
||||
--echo # HOUR(expr) returns the hh part of CAST(expr AS DATETIME)
|
||||
|
||||
CALL p1('HOUR');
|
||||
let extr=EXTRACT(HOUR FROM $ts);
|
||||
let func=HOUR($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(MINUTE FROM expr) includes the sign for TIME
|
||||
--echo # MINUTE(expr) returns the absolute value
|
||||
|
||||
CALL p1('MINUTE');
|
||||
let extr=EXTRACT(MINUTE FROM $ts);
|
||||
let func=MINUTE($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(SECONDS FROM expr) includes the sign for TIME
|
||||
--echo # SECONDS(expr) returns the absolute value
|
||||
|
||||
CALL p1('SECOND');
|
||||
let extr=EXTRACT(SECOND FROM $ts);
|
||||
let func=SECOND($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # EXTRACT(MICROSECONDS FROM expr) includes the sign for TIME
|
||||
--echo # MICROSECONDS(expr) returns the absolute value
|
||||
|
||||
CALL p1('MICROSECOND');
|
||||
let extr=EXTRACT(MICROSECOND FROM $ts);
|
||||
let func=MICROSECOND($ts);
|
||||
let extr01=`SELECT params("$extr", $pcount01) AS p`;
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let extr02=`SELECT params("$extr", $pcount02) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
--echo
|
||||
--echo
|
||||
--echo # DAYOFYEAR
|
||||
|
||||
CALL p1('DAYOFYEAR');
|
||||
let func=DAYOFYEAR($ts);
|
||||
let func01=`SELECT params("$func", $pcount01) AS p`;
|
||||
let func02=`SELECT params("$func", $pcount02) AS p`;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01;
|
||||
eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02;
|
||||
CALL show_drop;
|
||||
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP PROCEDURE p1;
|
||||
DROP PROCEDURE show_drop;
|
||||
DROP FUNCTION params;
|
||||
|
||||
DROP FUNCTION select01;
|
||||
DROP FUNCTION select02;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.5 tests
|
||||
--echo #
|
||||
|
@ -1443,3 +1443,98 @@ drop table t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
# MDEV-31276: Execution of PS from grouping query with join
|
||||
# and GROUP_CONCAT set function
|
||||
#
|
||||
create table t1 (a int, b varchar(20)) engine=myisam;
|
||||
create table t2 (a int, c varchar(20)) engine=myisam;
|
||||
insert into t1 values (1,"aaaaaaaaaa"),(2,"bbbbbbbbbb");
|
||||
insert into t2 values (1,"cccccccccc"),(2,"dddddddddd");
|
||||
insert into t2 values (1,"eeeeeee"),(2,"fffffff");
|
||||
set group_concat_max_len=5;
|
||||
select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a;
|
||||
count(*) group_concat(t1.b,t2.c)
|
||||
2 aaaaa
|
||||
2 bbbbb
|
||||
Warnings:
|
||||
Warning 1260 Row 1 was cut by GROUP_CONCAT()
|
||||
Warning 1260 Row 2 was cut by GROUP_CONCAT()
|
||||
explain select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
|
||||
prepare stmt from "select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a";
|
||||
execute stmt;
|
||||
count(*) group_concat(t1.b,t2.c)
|
||||
2 aaaaa
|
||||
2 bbbbb
|
||||
Warnings:
|
||||
Warning 1260 Row 1 was cut by GROUP_CONCAT()
|
||||
Warning 1260 Row 2 was cut by GROUP_CONCAT()
|
||||
execute stmt;
|
||||
count(*) group_concat(t1.b,t2.c)
|
||||
2 aaaaa
|
||||
2 bbbbb
|
||||
Warnings:
|
||||
Warning 1260 Row 1 was cut by GROUP_CONCAT()
|
||||
Warning 1260 Row 2 was cut by GROUP_CONCAT()
|
||||
deallocate prepare stmt;
|
||||
set join_cache_level=0;
|
||||
select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a;
|
||||
count(*) group_concat(t1.b,t2.c)
|
||||
2 aaaaa
|
||||
2 bbbbb
|
||||
Warnings:
|
||||
Warning 1260 Row 1 was cut by GROUP_CONCAT()
|
||||
Warning 1260 Row 2 was cut by GROUP_CONCAT()
|
||||
explain select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using filesort
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where
|
||||
prepare stmt from "select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a";
|
||||
execute stmt;
|
||||
count(*) group_concat(t1.b,t2.c)
|
||||
2 aaaaa
|
||||
2 bbbbb
|
||||
Warnings:
|
||||
Warning 1260 Row 1 was cut by GROUP_CONCAT()
|
||||
Warning 1260 Row 2 was cut by GROUP_CONCAT()
|
||||
execute stmt;
|
||||
count(*) group_concat(t1.b,t2.c)
|
||||
2 aaaaa
|
||||
2 bbbbb
|
||||
Warnings:
|
||||
Warning 1260 Row 1 was cut by GROUP_CONCAT()
|
||||
Warning 1260 Row 2 was cut by GROUP_CONCAT()
|
||||
deallocate prepare stmt;
|
||||
set join_cache_level=default;
|
||||
set group_concat_max_len=default;
|
||||
drop table t1,t2;
|
||||
#
|
||||
# MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion
|
||||
#
|
||||
SET NAMES utf8, @@collation_connection=latin1_swedish_ci;
|
||||
CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET latin1;
|
||||
INSERT INTO t1 VALUES ('a'),('A');
|
||||
CREATE OR REPLACE VIEW v1 AS
|
||||
SELECT GROUP_CONCAT(c SEPARATOR 'ß') AS c1 FROM t1 GROUP BY c;
|
||||
SELECT * FROM v1;
|
||||
c1
|
||||
aßA
|
||||
SELECT HEX(c1) FROM v1;
|
||||
HEX(c1)
|
||||
61DF41
|
||||
SHOW CREATE VIEW v1;
|
||||
View Create View character_set_client collation_connection
|
||||
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select group_concat(`t1`.`c` separator 'ß') AS `c1` from `t1` group by `t1`.`c` utf8mb3 latin1_swedish_ci
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
SET NAMES latin1;
|
||||
# End of 10.5 tests
|
||||
|
@ -1066,3 +1066,59 @@ drop table t1;
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31276: Execution of PS from grouping query with join
|
||||
--echo # and GROUP_CONCAT set function
|
||||
--echo #
|
||||
|
||||
create table t1 (a int, b varchar(20)) engine=myisam;
|
||||
create table t2 (a int, c varchar(20)) engine=myisam;
|
||||
insert into t1 values (1,"aaaaaaaaaa"),(2,"bbbbbbbbbb");
|
||||
insert into t2 values (1,"cccccccccc"),(2,"dddddddddd");
|
||||
insert into t2 values (1,"eeeeeee"),(2,"fffffff");
|
||||
|
||||
let $q=
|
||||
select count(*), group_concat(t1.b,t2.c)
|
||||
from t1 join t2 on t1.a=t2.a group by t1.a;
|
||||
|
||||
set group_concat_max_len=5;
|
||||
|
||||
eval $q;
|
||||
eval explain $q;
|
||||
eval prepare stmt from "$q";
|
||||
execute stmt;
|
||||
execute stmt;
|
||||
deallocate prepare stmt;
|
||||
|
||||
set join_cache_level=0;
|
||||
|
||||
eval $q;
|
||||
eval explain $q;
|
||||
eval prepare stmt from "$q";
|
||||
execute stmt;
|
||||
execute stmt;
|
||||
deallocate prepare stmt;
|
||||
|
||||
set join_cache_level=default;
|
||||
set group_concat_max_len=default;
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion
|
||||
--echo #
|
||||
|
||||
SET NAMES utf8, @@collation_connection=latin1_swedish_ci;
|
||||
CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET latin1;
|
||||
INSERT INTO t1 VALUES ('a'),('A');
|
||||
CREATE OR REPLACE VIEW v1 AS
|
||||
SELECT GROUP_CONCAT(c SEPARATOR 'ß') AS c1 FROM t1 GROUP BY c;
|
||||
SELECT * FROM v1;
|
||||
SELECT HEX(c1) FROM v1;
|
||||
SHOW CREATE VIEW v1;
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
SET NAMES latin1;
|
||||
|
||||
--echo # End of 10.5 tests
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user