1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

Just move, no code changes otherwise.

Part of MDEV-7974 - backport fix for mysql bug#12161 (XA and binlog)
This commit is contained in:
Sergey Vojtovich
2019-04-18 14:43:40 +04:00
parent ca7fbcea6c
commit 07140f171d
11 changed files with 872 additions and 831 deletions

View File

@ -5641,263 +5641,6 @@ void THD::mark_transaction_to_rollback(bool all)
is_fatal_sub_stmt_error= true;
transaction_rollback_request= all;
}
/***************************************************************************
Handling of XA id cacheing
***************************************************************************/
class XID_cache_element
{
/*
m_state is used to prevent elements from being deleted while XA RECOVER
iterates xid cache and to prevent recovered elments from being acquired by
multiple threads.
bits 1..29 are reference counter
bit 30 is RECOVERED flag
bit 31 is ACQUIRED flag (thread owns this xid)
bit 32 is unused
Newly allocated and deleted elements have m_state set to 0.
On lock() m_state is atomically incremented. It also creates load-ACQUIRE
memory barrier to make sure m_state is actually updated before furhter
memory accesses. Attempting to lock an element that has neither ACQUIRED
nor RECOVERED flag set returns failure and further accesses to element
memory are forbidden.
On unlock() m_state is decremented. It also creates store-RELEASE memory
barrier to make sure m_state is actually updated after preceding memory
accesses.
ACQUIRED flag is set when thread registers it's xid or when thread acquires
recovered xid.
RECOVERED flag is set for elements found during crash recovery.
ACQUIRED and RECOVERED flags are cleared before element is deleted from
hash in a spin loop, after last reference is released.
*/
std::atomic<int32_t> m_state;
public:
static const int32 ACQUIRED= 1 << 30;
static const int32 RECOVERED= 1 << 29;
XID_STATE *m_xid_state;
bool is_set(int32_t flag)
{ return m_state.load(std::memory_order_relaxed) & flag; }
void set(int32_t flag)
{
DBUG_ASSERT(!is_set(ACQUIRED | RECOVERED));
m_state.fetch_add(flag, std::memory_order_relaxed);
}
bool lock()
{
int32_t old= m_state.fetch_add(1, std::memory_order_acquire);
if (old & (ACQUIRED | RECOVERED))
return true;
unlock();
return false;
}
void unlock()
{ m_state.fetch_sub(1, std::memory_order_release); }
void mark_uninitialized()
{
int32_t old= ACQUIRED;
while (!m_state.compare_exchange_weak(old, 0,
std::memory_order_relaxed,
std::memory_order_relaxed))
{
old&= ACQUIRED | RECOVERED;
(void) LF_BACKOFF();
}
}
bool acquire_recovered()
{
int32_t old= RECOVERED;
while (!m_state.compare_exchange_weak(old, ACQUIRED | RECOVERED,
std::memory_order_relaxed,
std::memory_order_relaxed))
{
if (!(old & RECOVERED) || (old & ACQUIRED))
return false;
old= RECOVERED;
(void) LF_BACKOFF();
}
return true;
}
static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)),
XID_cache_element *element,
XID_STATE *xid_state)
{
DBUG_ASSERT(!element->is_set(ACQUIRED | RECOVERED));
element->m_xid_state= xid_state;
xid_state->xid_cache_element= element;
}
static void lf_alloc_constructor(uchar *ptr)
{
XID_cache_element *element= (XID_cache_element*) (ptr + LF_HASH_OVERHEAD);
element->m_state= 0;
}
static void lf_alloc_destructor(uchar *ptr)
{
XID_cache_element *element= (XID_cache_element*) (ptr + LF_HASH_OVERHEAD);
DBUG_ASSERT(!element->is_set(ACQUIRED));
if (element->is_set(RECOVERED))
my_free(element->m_xid_state);
}
static uchar *key(const XID_cache_element *element, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length= element->m_xid_state->xid.key_length();
return element->m_xid_state->xid.key();
}
};
static LF_HASH xid_cache;
static bool xid_cache_inited;
bool THD::fix_xid_hash_pins()
{
if (!xid_hash_pins)
xid_hash_pins= lf_hash_get_pins(&xid_cache);
return !xid_hash_pins;
}
void xid_cache_init()
{
xid_cache_inited= true;
lf_hash_init(&xid_cache, sizeof(XID_cache_element), LF_HASH_UNIQUE, 0, 0,
(my_hash_get_key) XID_cache_element::key, &my_charset_bin);
xid_cache.alloc.constructor= XID_cache_element::lf_alloc_constructor;
xid_cache.alloc.destructor= XID_cache_element::lf_alloc_destructor;
xid_cache.initializer=
(lf_hash_initializer) XID_cache_element::lf_hash_initializer;
}
void xid_cache_free()
{
if (xid_cache_inited)
{
lf_hash_destroy(&xid_cache);
xid_cache_inited= false;
}
}
/**
Find recovered XA transaction by XID.
*/
XID_STATE *xid_cache_search(THD *thd, XID *xid)
{
XID_STATE *xs= 0;
DBUG_ASSERT(thd->xid_hash_pins);
XID_cache_element *element=
(XID_cache_element*) lf_hash_search(&xid_cache, thd->xid_hash_pins,
xid->key(), xid->key_length());
if (element)
{
if (element->acquire_recovered())
xs= element->m_xid_state;
lf_hash_search_unpin(thd->xid_hash_pins);
DEBUG_SYNC(thd, "xa_after_search");
}
return xs;
}
bool xid_cache_insert(XID *xid, enum xa_states xa_state)
{
XID_STATE *xs;
LF_PINS *pins;
int res= 1;
if (!(pins= lf_hash_get_pins(&xid_cache)))
return true;
if ((xs= (XID_STATE*) my_malloc(sizeof(*xs), MYF(MY_WME))))
{
xs->xa_state=xa_state;
xs->xid.set(xid);
xs->rm_error=0;
if ((res= lf_hash_insert(&xid_cache, pins, xs)))
my_free(xs);
else
xs->xid_cache_element->set(XID_cache_element::RECOVERED);
if (res == 1)
res= 0;
}
lf_hash_put_pins(pins);
return res;
}
bool xid_cache_insert(THD *thd, XID_STATE *xid_state)
{
if (thd->fix_xid_hash_pins())
return true;
int res= lf_hash_insert(&xid_cache, thd->xid_hash_pins, xid_state);
switch (res)
{
case 0:
xid_state->xid_cache_element->set(XID_cache_element::ACQUIRED);
break;
case 1:
my_error(ER_XAER_DUPID, MYF(0));
/* fall through */
default:
xid_state->xid_cache_element= 0;
}
return res;
}
void xid_cache_delete(THD *thd, XID_STATE *xid_state)
{
if (xid_state->xid_cache_element)
{
bool recovered= xid_state->xid_cache_element->is_set(XID_cache_element::RECOVERED);
DBUG_ASSERT(thd->xid_hash_pins);
xid_state->xid_cache_element->mark_uninitialized();
lf_hash_delete(&xid_cache, thd->xid_hash_pins,
xid_state->xid.key(), xid_state->xid.key_length());
xid_state->xid_cache_element= 0;
if (recovered)
my_free(xid_state);
}
}
struct xid_cache_iterate_arg
{
my_hash_walk_action action;
void *argument;
};
static my_bool xid_cache_iterate_callback(XID_cache_element *element,
xid_cache_iterate_arg *arg)
{
my_bool res= FALSE;
if (element->lock())
{
res= arg->action(element->m_xid_state, arg->argument);
element->unlock();
}
return res;
}
int xid_cache_iterate(THD *thd, my_hash_walk_action action, void *arg)
{
xid_cache_iterate_arg argument= { action, arg };
return thd->fix_xid_hash_pins() ? -1 :
lf_hash_iterate(&xid_cache, thd->xid_hash_pins,
(my_hash_walk_action) xid_cache_iterate_callback,
&argument);
}
/**