1
0
mirror of https://github.com/codership/wsrep-lib.git synced 2025-07-28 20:02:00 +03:00

- Added wait-until parameter for begin_nbo_phase_two().

- Retry enter_toi() in poll_enter_toi() also for error_connection_failed
  which means that the connectivity to the cluster has been lost,
  a.k.a non-prim.
This commit is contained in:
Teemu Ollakka
2019-09-09 17:38:39 +03:00
parent 750052b640
commit 086c466637
3 changed files with 25 additions and 9 deletions

View File

@ -790,8 +790,13 @@ namespace wsrep
* passed to begin_nbo_phase_one(). * passed to begin_nbo_phase_one().
* *
* @param keys Key array. * @param keys Key array.
* @param wait_until Time point to wait until for entering TOI for
* phase two.
*/ */
int begin_nbo_phase_two(const wsrep::key_array& keys); int begin_nbo_phase_two(const wsrep::key_array& keys,
std::chrono::time_point<wsrep::clock>
wait_until =
std::chrono::time_point<wsrep::clock>());
/** /**
* End non-blocking operation phase two. This call will * End non-blocking operation phase two. This call will
@ -1018,6 +1023,7 @@ namespace wsrep
poll_enter_toi(wsrep::unique_lock<wsrep::mutex>& lock, poll_enter_toi(wsrep::unique_lock<wsrep::mutex>& lock,
const wsrep::key_array& keys, const wsrep::key_array& keys,
const wsrep::const_buffer& buffer, const wsrep::const_buffer& buffer,
wsrep::ws_meta& meta,
int flags, int flags,
std::chrono::time_point<wsrep::clock> wait_until); std::chrono::time_point<wsrep::clock> wait_until);
void enter_toi_common(wsrep::unique_lock<wsrep::mutex>&); void enter_toi_common(wsrep::unique_lock<wsrep::mutex>&);

View File

@ -237,7 +237,8 @@ namespace wsrep
error_size_exceeded, error_size_exceeded,
/** Connectivity to cluster lost */ /** Connectivity to cluster lost */
error_connection_failed, error_connection_failed,
/** Internal provider failure, provider must be reinitialized */ /** Internal provider failure or provider was closed,
provider must be reinitialized */
error_provider_failed, error_provider_failed,
/** Fatal error, server must abort */ /** Fatal error, server must abort */
error_fatal, error_fatal,

View File

@ -336,6 +336,7 @@ wsrep::client_state::poll_enter_toi(
wsrep::unique_lock<wsrep::mutex>& lock, wsrep::unique_lock<wsrep::mutex>& lock,
const wsrep::key_array& keys, const wsrep::key_array& keys,
const wsrep::const_buffer& buffer, const wsrep::const_buffer& buffer,
wsrep::ws_meta& meta,
int flags, int flags,
std::chrono::time_point<wsrep::clock> wait_until) std::chrono::time_point<wsrep::clock> wait_until)
{ {
@ -349,7 +350,7 @@ wsrep::client_state::poll_enter_toi(
do do
{ {
lock.unlock(); lock.unlock();
status = provider().enter_toi(id_, keys, buffer, toi_meta_, flags); status = provider().enter_toi(id_, keys, buffer, meta, flags);
if (status != wsrep::provider::success && if (status != wsrep::provider::success &&
not toi_meta_.gtid().is_undefined()) not toi_meta_.gtid().is_undefined())
{ {
@ -364,13 +365,15 @@ wsrep::client_state::poll_enter_toi(
} }
toi_meta_ = wsrep::ws_meta(); toi_meta_ = wsrep::ws_meta();
} }
if (status == wsrep::provider::error_certification_failed) if (status == wsrep::provider::error_certification_failed ||
status == wsrep::provider::error_connection_failed)
{ {
::usleep(100000); ::usleep(100000);
} }
lock.lock(); lock.lock();
} }
while (status == wsrep::provider::error_certification_failed && while ((status == wsrep::provider::error_certification_failed ||
status == wsrep::provider::error_connection_failed) &&
wait_until.time_since_epoch().count() && wait_until.time_since_epoch().count() &&
wsrep::clock::now() < wait_until && wsrep::clock::now() < wait_until &&
not client_service_.interrupted(lock)); not client_service_.interrupted(lock));
@ -399,6 +402,7 @@ int wsrep::client_state::enter_toi_local(const wsrep::key_array& keys,
auto const status(poll_enter_toi( auto const status(poll_enter_toi(
lock, keys, buffer, lock, keys, buffer,
toi_meta_,
wsrep::provider::flag::start_transaction | wsrep::provider::flag::start_transaction |
wsrep::provider::flag::commit, wsrep::provider::flag::commit,
wait_until)); wait_until));
@ -528,6 +532,7 @@ int wsrep::client_state::begin_nbo_phase_one(
int ret; int ret;
auto const status(poll_enter_toi( auto const status(poll_enter_toi(
lock, keys, buffer, lock, keys, buffer,
toi_meta_,
wsrep::provider::flag::start_transaction, wsrep::provider::flag::start_transaction,
wait_until)); wait_until));
switch (status) switch (status)
@ -591,7 +596,9 @@ int wsrep::client_state::enter_nbo_mode(const wsrep::ws_meta& ws_meta)
return 0; return 0;
} }
int wsrep::client_state::begin_nbo_phase_two(const wsrep::key_array& keys) int wsrep::client_state::begin_nbo_phase_two(
const wsrep::key_array& keys,
std::chrono::time_point<wsrep::clock> wait_until)
{ {
debug_log_state("begin_nbo_phase_two: enter"); debug_log_state("begin_nbo_phase_two: enter");
debug_log_keys(keys); debug_log_keys(keys);
@ -606,9 +613,11 @@ int wsrep::client_state::begin_nbo_phase_two(const wsrep::key_array& keys)
// Output stored in nbo_meta_ is copied to toi_meta_ for // Output stored in nbo_meta_ is copied to toi_meta_ for
// phase two end. // phase two end.
enum wsrep::provider::status status( enum wsrep::provider::status status(
provider().enter_toi(id_, keys, poll_enter_toi(lock, keys,
wsrep::const_buffer(), nbo_meta_, wsrep::const_buffer(),
wsrep::provider::flag::commit)); nbo_meta_,
wsrep::provider::flag::commit,
wait_until));
int ret; int ret;
switch (status) switch (status)
{ {