1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-05 13:16:09 +03:00

Merge branch '10.6' into 10.11

This commit is contained in:
Sergei Golubchik
2024-04-22 11:00:03 +02:00
418 changed files with 7074 additions and 2930 deletions

View File

@@ -344,6 +344,27 @@ struct rpl_parallel_thread_pool {
struct rpl_parallel_entry {
/*
A small struct to put worker threads references into a FIFO (using an
I_List) for round-robin scheduling.
*/
struct sched_bucket : public ilink {
sched_bucket() : thr(nullptr) { }
rpl_parallel_thread *thr;
};
/*
A struct to keep track of into which "generation" an XA XID was last
scheduled. A "generation" means that we know that every worker thread
slot in the rpl_parallel_entry was scheduled at least once. When more
that two generations have passed, we can safely reuse the XID in a
different worker.
*/
struct xid_active_generation {
uint64 generation;
sched_bucket *thr;
xid_t xid;
};
mysql_mutex_t LOCK_parallel_entry;
mysql_cond_t COND_parallel_entry;
uint32 domain_id;
@@ -374,17 +395,36 @@ struct rpl_parallel_entry {
uint64 stop_sub_id;
/*
Cyclic array recording the last rpl_thread_max worker threads that we
Array recording the last rpl_thread_max worker threads that we
queued event for. This is used to limit how many workers a single domain
can occupy (--slave-domain-parallel-threads).
The array is structured as a FIFO using an I_List thread_sched_fifo.
Note that workers are never explicitly deleted from the array. Instead,
we need to check (under LOCK_rpl_thread) that the thread still belongs
to us before re-using (rpl_thread::current_owner).
*/
rpl_parallel_thread **rpl_threads;
sched_bucket *rpl_threads;
I_List<sched_bucket> *thread_sched_fifo;
uint32 rpl_thread_max;
uint32 rpl_thread_idx;
/*
Keep track of all XA XIDs that may still be active in a worker thread.
The elements are of type xid_active_generation.
*/
DYNAMIC_ARRAY maybe_active_xid;
/*
Keeping track of the current scheduling generation.
A new generation means that every worker thread in the rpl_threads array
have been scheduled at least one event group.
When we have scheduled to slot current_generation_idx= 0, 1, ..., N-1 in this
order, we know that (at least) one generation has passed.
*/
uint64 current_generation;
uint32 current_generation_idx;
/*
The sub_id of the last transaction to commit within this domain_id.
Must be accessed under LOCK_parallel_entry protection.
@@ -440,14 +480,22 @@ struct rpl_parallel_entry {
/* Relay log info of replication source for this entry. */
Relay_log_info *rli;
void check_scheduling_generation(sched_bucket *cur);
sched_bucket *check_xa_xid_dependency(xid_t *xid);
rpl_parallel_thread * choose_thread(rpl_group_info *rgi, bool *did_enter_cond,
PSI_stage_info *old_stage,
Gtid_log_event *gtid_ev);
rpl_parallel_thread *
choose_thread_internal(uint idx, bool *did_enter_cond, rpl_group_info *rgi,
PSI_stage_info *old_stage);
choose_thread_internal(sched_bucket *cur_thr, bool *did_enter_cond,
rpl_group_info *rgi, PSI_stage_info *old_stage);
int queue_master_restart(rpl_group_info *rgi,
Format_description_log_event *fdev);
/*
the initial size of maybe_ array corresponds to the case of
each worker receives perhaps unlikely XA-PREPARE and XA-COMMIT within
the same generation.
*/
inline uint active_xid_init_alloc() { return 3 * 2 * rpl_thread_max; }
};
struct rpl_parallel {
HASH domain_hash;