1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge branch '10.11' into 11.0

This commit is contained in:
Sergei Golubchik
2024-05-11 13:23:27 +02:00
820 changed files with 18746 additions and 6295 deletions

View File

@@ -684,13 +684,6 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
hton->slot= HA_SLOT_UNDEF;
/* Historical Requirement */
plugin->data= hton; // shortcut for the future
/* [remove after merge] notes on merge conflict (MDEV-31400):
10.6-10.11: 13ba00ff4933cfc1712676f323587504e453d1b5
11.0-11.2: 42f8be10f18163c4025710cf6a212e82bddb2f62
The 10.11->11.0 conflict is trivial, but the reference commit also
contains different non-conflict changes needs to be applied to 11.0
(and beyond).
*/
if (plugin->plugin->init && (ret= plugin->plugin->init(hton)))
goto err;
@@ -1660,6 +1653,29 @@ ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list,
return rw_ha_count;
}
#ifdef WITH_WSREP
/**
Check if transaction contains storage engine not supporting
two-phase commit and transaction is read-write.
@retval
true Transaction contains storage engine not supporting
two phase commit and transaction is read-write
@retval
false otherwise
*/
static bool wsrep_have_no2pc_rw_ha(Ha_trx_info* ha_list)
{
for (Ha_trx_info *ha_info=ha_list; ha_info; ha_info= ha_info->next())
{
handlerton *ht= ha_info->ht();
// Transaction is read-write and handler does not support 2pc
if (ha_info->is_trx_read_write() && ht->prepare==0)
return true;
}
return false;
}
#endif /* WITH_WSREP */
/**
@retval
@@ -1872,14 +1888,18 @@ int ha_commit_trans(THD *thd, bool all)
*/
if (run_wsrep_hooks)
{
// This commit involves more than one storage engine and requires
// two phases, but some engines don't support it.
// Issue a message to the client and roll back the transaction.
if (trans->no_2pc && rw_ha_count > 1)
// This commit involves storage engines that do not support two phases.
// We allow read only transactions to such storage engines but not
// read write transactions.
if (trans->no_2pc && rw_ha_count > 1 && wsrep_have_no2pc_rw_ha(trans->ha_list))
{
// REPLACE|INSERT INTO ... SELECT uses TOI for MyISAM|Aria
if (WSREP(thd) && thd->wsrep_cs().mode() != wsrep::client_state::m_toi)
{
// This commit involves more than one storage engine and requires
// two phases, but some engines don't support it.
// Issue a message to the client and roll back the transaction.
// REPLACE|INSERT INTO ... SELECT uses TOI for MyISAM|Aria
if (WSREP(thd) && thd->wsrep_cs().mode() != wsrep::client_state::m_toi)
{
my_message(ER_ERROR_DURING_COMMIT, "Transactional commit not supported "
"by involved engine(s)", MYF(0));
error= 1;
@@ -5061,6 +5081,12 @@ uint handler::get_dup_key(int error)
DBUG_RETURN(errkey);
}
bool handler::has_dup_ref() const
{
DBUG_ASSERT(lookup_errkey != (uint)-1 || errkey != (uint)-1);
return ha_table_flags() & HA_DUPLICATE_POS || lookup_errkey != (uint)-1;
}
/**
Delete all files with extension from bas_ext().
@@ -5394,34 +5420,48 @@ handler::ha_check_and_repair(THD *thd)
/**
Disable indexes: public interface.
@param map has 0 for all indexes that should be disabled
@param persist indexes should stay disabled after server restart
Currently engines don't support disabling an arbitrary subset of indexes.
In particular, if the change is persistent:
* auto-increment index should not be disabled
* unique indexes should not be disabled
if unique or auto-increment indexes are disabled (non-persistently),
the caller should only insert data that does not require
auto-inc generation and does not violate uniqueness
@sa handler::disable_indexes()
*/
int
handler::ha_disable_indexes(uint mode)
handler::ha_disable_indexes(key_map map, bool persist)
{
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
m_lock_type != F_UNLCK);
DBUG_ASSERT(table->s->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK);
mark_trx_read_write();
return disable_indexes(mode);
return disable_indexes(map, persist);
}
/**
Enable indexes: public interface.
@param map has 1 for all indexes that should be enabled
@param persist indexes should stay enabled after server restart
@sa handler::enable_indexes()
*/
int
handler::ha_enable_indexes(uint mode)
handler::ha_enable_indexes(key_map map, bool persist)
{
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
m_lock_type != F_UNLCK);
DBUG_ASSERT(table->s->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK);
mark_trx_read_write();
return enable_indexes(mode);
return enable_indexes(map, persist);
}
@@ -5727,8 +5767,6 @@ handler::ha_create_partitioning_metadata(const char *name,
DBUG_ASSERT(m_lock_type == F_UNLCK ||
(!old_name && strcmp(name, table_share->path.str)));
mark_trx_read_write();
return create_partitioning_metadata(name, old_name, action_flag);
}
@@ -7531,11 +7569,8 @@ exit:
if (error == HA_ERR_FOUND_DUPP_KEY)
{
table->file->lookup_errkey= key_no;
if (ha_table_flags() & HA_DUPLICATE_POS)
{
lookup_handler->position(table->record[0]);
memcpy(table->file->dup_ref, lookup_handler->ref, ref_length);
}
lookup_handler->position(table->record[0]);
memcpy(table->file->dup_ref, lookup_handler->ref, ref_length);
}
restore_record(table, file->lookup_buffer);
table->restore_blob_values(blob_storage);
@@ -7614,7 +7649,7 @@ int handler::check_duplicate_long_entries_update(const uchar *new_rec)
So also check for that too
*/
if((field->is_null(0) != field->is_null(reclength)) ||
field->cmp_binary_offset(reclength))
field->cmp_offset(reclength))
{
if((error= check_duplicate_long_entry_key(new_rec, i)))
return error;
@@ -7823,7 +7858,16 @@ int handler::ha_write_row(const uchar *buf)
m_lock_type == F_WRLCK);
DBUG_ENTER("handler::ha_write_row");
DEBUG_SYNC_C("ha_write_row_start");
#ifdef WITH_WSREP
DBUG_EXECUTE_IF("wsrep_ha_write_row",
{
const char act[]=
"now "
"SIGNAL wsrep_ha_write_row_reached "
"WAIT_FOR wsrep_ha_write_row_continue";
DBUG_ASSERT(!debug_sync_set_action(ha_thd(), STRING_WITH_LEN(act)));
});
#endif /* WITH_WSREP */
if ((error= ha_check_overlaps(NULL, buf)))
DBUG_RETURN(error);