1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00

MDEV-31833 replication breaks when using optimistic replication and replica is a galera node

MariaDB async replication SQL thread was stopped for any failure
in applying of replication events and error message logged for the failure
was: "Node has dropped from cluster". The assumption was that event applying
failure is always due to node dropping out.
With optimistic parallel replication, event applying can fail for natural
reasons and applying should be retried to handle the failure. This retry
logic was never exercised because the slave SQL thread was stopped with first
applying failure.

To support optimistic parallel replication retrying logic this commit will
now skip replication slave abort, if node remains in cluster (wsrep_ready==ON)
and replication is configured for optimistic or aggressive retry logic.

During the development of this fix, galera.galera_as_slave_nonprim test showed
some problems. The test was analyzed, and it appears to need some attention.
One excessive sleep command was removed in this commit, but it will need more
fixes still to be fully deterministic. After this commit galera_as_slave_nonprim
is successful, though.

Signed-off-by: Julius Goryavsky <julius.goryavsky@mariadb.com>
This commit is contained in:
sjaakola
2023-09-12 02:37:30 +02:00
committed by Julius Goryavsky
parent ef569c324d
commit a3cbc44b24
4 changed files with 30 additions and 6 deletions

View File

@ -27,7 +27,6 @@ SET SESSION wsrep_sync_wait = 0;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
--connection node_2
--sleep 1
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
@ -47,7 +46,6 @@ INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
--connection node_2
--sleep 5
--let $value = query_get_value(SHOW SLAVE STATUS, Last_SQL_Error, 1)
--connection node_1
--disable_query_log

View File

@ -5728,7 +5728,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
ignored_error_code(actual_error) : 0);
#ifdef WITH_WSREP
if (WSREP(thd) && wsrep_ignored_error_code(this, actual_error))
if (WSREP(thd) && thd->wsrep_applier &&
wsrep_ignored_error_code(this, actual_error))
{
idempotent_error= true;
thd->wsrep_has_ignored_error= true;

View File

@ -820,6 +820,10 @@ do_retry:
event_count= 0;
err= 0;
errmsg= NULL;
#ifdef WITH_WSREP
thd->wsrep_cs().reset_error();
WSREP_DEBUG("retrying async replication event");
#endif /* WITH_WSREP */
/*
If we already started committing before getting the deadlock (or other
@ -919,6 +923,7 @@ do_retry:
err= rgi->worker_error= 1;
my_error(ER_PRIOR_COMMIT_FAILED, MYF(0));
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
goto err;
}
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
@ -960,7 +965,17 @@ do_retry:
possibility of an old deadlock kill lingering on beyond this point.
*/
thd->reset_killed();
#ifdef WITH_WSREP
if (wsrep_before_command(thd))
{
WSREP_WARN("Parallel slave worker failed at wsrep_before_command() hook");
err= 1;
goto err;
}
wsrep_start_trx_if_not_started(thd);
WSREP_DEBUG("parallel slave retry, after trx start");
#endif /* WITH_WSREP */
strmake_buf(log_name, ir->name);
if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0)
{

View File

@ -3878,9 +3878,19 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
default:
WSREP_DEBUG("SQL apply failed, res %d conflict state: %s",
exec_res, wsrep_thd_transaction_state_str(thd));
/*
async replication thread should be stopped, if failure was
not due to optimistic parallel applying or if node
has dropped from cluster
*/
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL &&
((rli->mi->using_parallel() &&
rli->mi->parallel_mode <= SLAVE_PARALLEL_CONSERVATIVE) ||
wsrep_ready == 0)) {
rli->abort_slave= 1;
rli->report(ERROR_LEVEL, ER_UNKNOWN_COM_ERROR, rgi->gtid_info(),
"Node has dropped from cluster");
}
break;
}
mysql_mutex_unlock(&thd->LOCK_thd_data);