mirror of
https://github.com/MariaDB/server.git
synced 2025-10-12 12:25:37 +03:00
MDEV-9519: Data corruption will happen on the Galera cluster size change
If we have a 2+ node cluster which is replicating from an async master and the binlog_format is set to STATEMENT and multi-row inserts are executed on a table with an auto_increment column such that values are automatically generated by MySQL, then the server node generates wrong auto_increment values, which are different from what was generated on the async master. In the title of the MDEV-9519 it was proposed to ban start slave on a Galera if master binlog_format = statement and wsrep_auto_increment_control = 1, but the problem can be solved without such a restriction. The causes and fixes: 1. We need to improve processing of changing the auto-increment values after changing the cluster size. 2. If wsrep auto_increment_control switched on during operation of the node, then we should immediately update the auto_increment_increment and auto_increment_offset global variables, without waiting of the next invocation of the wsrep_view_handler_cb() callback. In the current version these variables retain its initial values if wsrep_auto_increment_control is switched on during operation of the node, which leads to inconsistent results on the different nodes in some scenarios. 3. If wsrep auto_increment_control switched off during operation of the node, then we must return the original values of the auto_increment_increment and auto_increment_offset global variables, as the user has set. To make this possible, we need to add a "shadow copies" of these variables (which stores the latest values set by the user). https://jira.mariadb.org/browse/MDEV-9519
This commit is contained in:
committed by
Jan Lindström
parent
80c3fd184d
commit
2c734c980e
@@ -2902,11 +2902,17 @@ compute_next_insert_id(ulonglong nr,struct system_variables *variables)
|
||||
nr= nr + 1; // optimization of the formula below
|
||||
else
|
||||
{
|
||||
nr= (((nr+ variables->auto_increment_increment -
|
||||
variables->auto_increment_offset)) /
|
||||
(ulonglong) variables->auto_increment_increment);
|
||||
nr= (nr* (ulonglong) variables->auto_increment_increment +
|
||||
variables->auto_increment_offset);
|
||||
/*
|
||||
Calculating the number of complete auto_increment_increment extents:
|
||||
*/
|
||||
nr= (nr + variables->auto_increment_increment -
|
||||
variables->auto_increment_offset) /
|
||||
(ulonglong) variables->auto_increment_increment;
|
||||
/*
|
||||
Adding an offset to the auto_increment_increment extent boundary:
|
||||
*/
|
||||
nr= nr * (ulonglong) variables->auto_increment_increment +
|
||||
variables->auto_increment_offset;
|
||||
}
|
||||
|
||||
if (unlikely(nr <= save_nr))
|
||||
@@ -2960,8 +2966,14 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
|
||||
}
|
||||
if (variables->auto_increment_increment == 1)
|
||||
return nr; // optimization of the formula below
|
||||
nr= (((nr - variables->auto_increment_offset)) /
|
||||
(ulonglong) variables->auto_increment_increment);
|
||||
/*
|
||||
Calculating the number of complete auto_increment_increment extents:
|
||||
*/
|
||||
nr= (nr - variables->auto_increment_offset) /
|
||||
(ulonglong) variables->auto_increment_increment;
|
||||
/*
|
||||
Adding an offset to the auto_increment_increment extent boundary:
|
||||
*/
|
||||
return (nr * (ulonglong) variables->auto_increment_increment +
|
||||
variables->auto_increment_offset);
|
||||
}
|
||||
@@ -3052,7 +3064,7 @@ int handler::update_auto_increment()
|
||||
bool append= FALSE;
|
||||
THD *thd= table->in_use;
|
||||
struct system_variables *variables= &thd->variables;
|
||||
int result=0, tmp;
|
||||
int tmp;
|
||||
enum enum_check_fields save_count_cuted_fields;
|
||||
DBUG_ENTER("handler::update_auto_increment");
|
||||
|
||||
@@ -3184,10 +3196,23 @@ int handler::update_auto_increment()
|
||||
if (unlikely(tmp)) // Out of range value in store
|
||||
{
|
||||
/*
|
||||
It's better to return an error here than getting a confusing
|
||||
'duplicate key error' later.
|
||||
First, test if the query was aborted due to strict mode constraints
|
||||
or new field value greater than maximum integer value:
|
||||
*/
|
||||
result= HA_ERR_AUTOINC_ERANGE;
|
||||
if (thd->killed == KILL_BAD_DATA ||
|
||||
nr > table->next_number_field->get_max_int_value())
|
||||
DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
|
||||
/*
|
||||
Field refused this value (overflow) and truncated it, use the result
|
||||
of the truncation (which is going to be inserted); however we try to
|
||||
decrease it to honour auto_increment_* variables.
|
||||
That will shift the left bound of the reserved interval, we don't
|
||||
bother shifting the right bound (anyway any other value from this
|
||||
interval will cause a duplicate key).
|
||||
*/
|
||||
nr= prev_insert_id(table->next_number_field->val_int(), variables);
|
||||
if (unlikely(table->next_number_field->store((longlong)nr, TRUE)))
|
||||
nr= table->next_number_field->val_int();
|
||||
}
|
||||
if (append)
|
||||
{
|
||||
@@ -3212,9 +3237,6 @@ int handler::update_auto_increment()
|
||||
*/
|
||||
insert_id_for_cur_row= nr;
|
||||
|
||||
if (result) // overflow
|
||||
DBUG_RETURN(result);
|
||||
|
||||
/*
|
||||
Set next insert id to point to next auto-increment value to be able to
|
||||
handle multi-row statements.
|
||||
|
Reference in New Issue
Block a user