mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Merge 10.5 into 10.6
This commit is contained in:
@ -42,5 +42,14 @@ SELECT * FROM v WHERE f = '10.5.20';
|
||||
f
|
||||
drop view v;
|
||||
#
|
||||
# MDEV-34785: Assertion failure in Item_func_or_sum::do_build_clone
|
||||
# (Item_func_not_all)
|
||||
#
|
||||
CREATE VIEW t AS SELECT 0 AS a;
|
||||
SELECT * FROM t WHERE a=ALL (SELECT 0);
|
||||
a
|
||||
0
|
||||
DROP VIEW t;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
|
@ -46,6 +46,15 @@ CREATE VIEW v AS SELECT version() AS f;
|
||||
SELECT * FROM v WHERE f = '10.5.20';
|
||||
drop view v;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-34785: Assertion failure in Item_func_or_sum::do_build_clone
|
||||
--echo # (Item_func_not_all)
|
||||
--echo #
|
||||
|
||||
CREATE VIEW t AS SELECT 0 AS a;
|
||||
SELECT * FROM t WHERE a=ALL (SELECT 0);
|
||||
DROP VIEW t;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.5 tests
|
||||
--echo #
|
||||
|
@ -1,5 +1,7 @@
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_log_bin.inc
|
||||
# Test does a lot of queries that take a lot of CPU under Valgrind.
|
||||
--source include/not_valgrind.inc
|
||||
|
||||
call mtr.add_suppression("Can't init tc log");
|
||||
call mtr.add_suppression("Aborting");
|
||||
|
@ -9,10 +9,7 @@ connection slave;
|
||||
SET @old_parallel= @@GLOBAL.slave_parallel_threads;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4;
|
||||
FLUSH TABLES WITH READ LOCK;
|
||||
include/start_slave.inc
|
||||
include/wait_for_slave_param.inc [Seconds_Behind_Master]
|
||||
UNLOCK TABLES;
|
||||
connection master;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO t2 VALUES (1);
|
||||
|
@ -14,6 +14,12 @@ SET GLOBAL event_scheduler=on;
|
||||
let $wait_condition= SELECT count(*)>0 FROM t1;
|
||||
--source include/wait_condition.inc
|
||||
SET GLOBAL event_scheduler=off;
|
||||
# If the time rolls to the next whole second just at this point, a new event
|
||||
# run may be scheduled. Wait for this to disappear, otherwise we see occasional
|
||||
# test failures if the table gets dropped before the extra event run completes.
|
||||
# Expect 5 connections: default, master, master1, server_1, binlog dump thread
|
||||
--let $wait_condition= SELECT COUNT(*) = 5 FROM INFORMATION_SCHEMA.PROCESSLIST;
|
||||
--source include/wait_condition.inc
|
||||
SELECT DISTINCT a FROM t1;
|
||||
DELETE FROM t1;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
# Test applies a large binlog, takes long under Valgrind with little benefit.
|
||||
--source include/not_valgrind.inc
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_partition.inc
|
||||
--source include/have_binlog_format_mixed_or_row.inc
|
||||
|
@ -28,14 +28,7 @@ SET GLOBAL slave_parallel_threads=10;
|
||||
--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1
|
||||
eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4;
|
||||
|
||||
# Block execution yet when the blocked query timestamp has been already accounted
|
||||
FLUSH TABLES WITH READ LOCK;
|
||||
--source include/start_slave.inc
|
||||
--let $slave_param = Seconds_Behind_Master
|
||||
--let $slave_param_value = 1
|
||||
--let $slave_param_comparison= >=
|
||||
--source include/wait_for_slave_param.inc
|
||||
UNLOCK TABLES;
|
||||
|
||||
--connection master
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB;
|
||||
|
@ -19,7 +19,17 @@
|
||||
--source include/master-slave.inc
|
||||
|
||||
connection slave;
|
||||
--let $connection_id=`SELECT id FROM information_schema.processlist where state LIKE 'Waiting for master to send event'`
|
||||
--let $i= 100
|
||||
while ($i > 0) {
|
||||
dec $i;
|
||||
--let $connection_id=`SELECT id FROM information_schema.processlist where state LIKE 'Waiting for master to send event'`
|
||||
if ($connection_id) {
|
||||
let $i= 0;
|
||||
}
|
||||
if ($i > 0) {
|
||||
--sleep 0.1
|
||||
}
|
||||
}
|
||||
|
||||
if(!$connection_id)
|
||||
{
|
||||
|
@ -39,7 +39,7 @@ extern "C" unsigned crc32c_sse42(unsigned crc, const void* buf, size_t size);
|
||||
|
||||
constexpr uint32_t cpuid_ecx_SSE42= 1U << 20;
|
||||
constexpr uint32_t cpuid_ecx_SSE42_AND_PCLMUL= cpuid_ecx_SSE42 | 1U << 1;
|
||||
constexpr uint32_t cpuid_ecx_XSAVE= 1U << 26;
|
||||
constexpr uint32_t cpuid_ecx_AVX_AND_XSAVE= 1U << 28 | 1U << 27;
|
||||
|
||||
static uint32_t cpuid_ecx()
|
||||
{
|
||||
@ -395,7 +395,7 @@ static bool os_have_avx512()
|
||||
|
||||
static ATTRIBUTE_NOINLINE bool have_vpclmulqdq(uint32_t cpuid_ecx)
|
||||
{
|
||||
if (!(cpuid_ecx & cpuid_ecx_XSAVE) || !os_have_avx512())
|
||||
if ((~cpuid_ecx & cpuid_ecx_AVX_AND_XSAVE) || !os_have_avx512())
|
||||
return false;
|
||||
# ifdef _MSC_VER
|
||||
int regs[4];
|
||||
|
@ -4883,7 +4883,6 @@ public:
|
||||
}
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_string_with_introducer>(thd, this); }
|
||||
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
|
||||
};
|
||||
|
||||
|
||||
@ -4898,7 +4897,6 @@ public:
|
||||
{ }
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_string_sys>(thd, this); }
|
||||
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
|
||||
};
|
||||
|
||||
|
||||
@ -4915,7 +4913,6 @@ public:
|
||||
{ }
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_string_ascii>(thd, this); }
|
||||
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
|
||||
};
|
||||
|
||||
|
||||
@ -4954,7 +4951,6 @@ public:
|
||||
}
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_static_string_func>(thd, this); }
|
||||
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
|
||||
};
|
||||
|
||||
|
||||
@ -4974,7 +4970,6 @@ public:
|
||||
}
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_partition_func_safe_string>(thd, this); }
|
||||
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
|
||||
};
|
||||
|
||||
|
||||
@ -5138,7 +5133,6 @@ public:
|
||||
void print(String *str, enum_query_type query_type) override;
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_bin_string>(thd, this); }
|
||||
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -756,6 +756,8 @@ public:
|
||||
void set_sub_test(Item_maxmin_subselect *item) { test_sub_item= item; test_sum_item= 0;};
|
||||
bool empty_underlying_subquery();
|
||||
Item *neg_transformer(THD *thd) override;
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
{ return get_item_copy<Item_func_not_all>(thd, this); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1466,11 +1466,23 @@ handle_rpl_parallel_thread(void *arg)
|
||||
after mark_start_commit(), we have to unmark, which has at least a
|
||||
theoretical possibility of leaving a window where it looks like all
|
||||
transactions in a GCO have started committing, while in fact one
|
||||
will need to rollback and retry. This is not supposed to be possible
|
||||
(since there is a deadlock, at least one transaction should be
|
||||
blocked from reaching commit), but this seems a fragile ensurance,
|
||||
and there were historically a number of subtle bugs in this area.
|
||||
will need to rollback and retry.
|
||||
|
||||
Normally this will not happen, since the kill is there to resolve a
|
||||
deadlock that is preventing at least one transaction from proceeding.
|
||||
One case it can happen is with InnoDB dict stats update, which can
|
||||
temporarily cause transactions to block each other, but locks are
|
||||
released immediately, they don't linger until commit. There could be
|
||||
other similar cases, there were historically a number of subtle bugs
|
||||
in this area.
|
||||
|
||||
But once we start the commit, we can expect that no new lock
|
||||
conflicts will be introduced. So by handling any lingering deadlock
|
||||
kill at this point just before mark_start_commit(), we should be
|
||||
robust even towards spurious deadlock kills.
|
||||
*/
|
||||
if (rgi->killed_for_retry != rpl_group_info::RETRY_KILL_NONE)
|
||||
wait_for_pending_deadlock_kill(thd, rgi);
|
||||
if (!thd->killed)
|
||||
{
|
||||
DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit");
|
||||
|
@ -2520,6 +2520,23 @@ rpl_group_info::unmark_start_commit()
|
||||
|
||||
e= this->parallel_entry;
|
||||
mysql_mutex_lock(&e->LOCK_parallel_entry);
|
||||
/*
|
||||
Assert that we have not already wrongly completed this GCO and signalled
|
||||
the next one to start, only to now unmark and make the signal invalid.
|
||||
This is to catch problems like MDEV-34696.
|
||||
|
||||
The error inject rpl_parallel_simulate_temp_err_xid is used to test this
|
||||
precise situation, that we handle it gracefully if it somehow occurs in a
|
||||
release build. So disable the assert in this case.
|
||||
*/
|
||||
#ifndef DBUG_OFF
|
||||
bool allow_unmark_after_complete= false;
|
||||
DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_xid",
|
||||
allow_unmark_after_complete= true;);
|
||||
DBUG_ASSERT(!gco->next_gco ||
|
||||
gco->next_gco->wait_count > e->count_committing_event_groups ||
|
||||
allow_unmark_after_complete);
|
||||
#endif
|
||||
--e->count_committing_event_groups;
|
||||
mysql_mutex_unlock(&e->LOCK_parallel_entry);
|
||||
}
|
||||
|
@ -3074,21 +3074,25 @@ my_bool _ma_check_if_right_bitmap_type(MARIA_HA *info,
|
||||
int _ma_bitmap_create_first(MARIA_SHARE *share)
|
||||
{
|
||||
uint block_size= share->bitmap.block_size;
|
||||
size_t error;
|
||||
File file= share->bitmap.file.file;
|
||||
uchar marker[CRC_SIZE];
|
||||
uchar *temp_buff;
|
||||
|
||||
if (!(temp_buff= (uchar*) my_alloca(block_size)))
|
||||
return 1;
|
||||
bzero(temp_buff, block_size);
|
||||
|
||||
/*
|
||||
Next write operation of the page will write correct CRC
|
||||
if it is needed
|
||||
*/
|
||||
int4store(marker, MARIA_NO_CRC_BITMAP_PAGE);
|
||||
int4store(temp_buff + block_size - CRC_SIZE, MARIA_NO_CRC_BITMAP_PAGE);
|
||||
|
||||
if (mysql_file_chsize(file, block_size - sizeof(marker),
|
||||
0, MYF(MY_WME)) ||
|
||||
my_pwrite(file, marker, sizeof(marker),
|
||||
block_size - sizeof(marker),
|
||||
MYF(MY_NABP | MY_WME)))
|
||||
error= my_pwrite(file, temp_buff, block_size, 0, MYF(MY_NABP | MY_WME));
|
||||
my_afree(temp_buff);
|
||||
if (error)
|
||||
return 1;
|
||||
|
||||
share->state.state.data_file_length= block_size;
|
||||
_ma_bitmap_delete_all(share);
|
||||
return 0;
|
||||
|
@ -420,6 +420,8 @@ int maria_chk_size(HA_CHECK *param, register MARIA_HA *info)
|
||||
/* We cannot check file sizes for S3 */
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
/* We should never come here with internal temporary tables */
|
||||
DBUG_ASSERT(!share->internal_table);
|
||||
|
||||
if (!(param->testflag & T_SILENT))
|
||||
puts("- check file-size");
|
||||
@ -715,6 +717,8 @@ static int chk_index_down(HA_CHECK *param, MARIA_HA *info,
|
||||
MARIA_PAGE ma_page;
|
||||
DBUG_ENTER("chk_index_down");
|
||||
|
||||
DBUG_ASSERT(!share->internal_table);
|
||||
|
||||
/* Key blocks must lay within the key file length entirely. */
|
||||
if (page + keyinfo->block_length > share->state.state.key_file_length)
|
||||
{
|
||||
@ -2467,7 +2471,16 @@ static int initialize_variables_for_repair(HA_CHECK *param,
|
||||
return 1;
|
||||
|
||||
/* calculate max_records */
|
||||
sort_info->filelength= my_seek(info->dfile.file, 0L, MY_SEEK_END, MYF(0));
|
||||
if (!share->internal_table)
|
||||
{
|
||||
/* Get real file size */
|
||||
sort_info->filelength= my_seek(info->dfile.file, 0L, MY_SEEK_END, MYF(0));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* For internal temporary files we are using the logical file length */
|
||||
sort_info->filelength= share->state.state.data_file_length;
|
||||
}
|
||||
|
||||
param->max_progress= sort_info->filelength;
|
||||
if ((param->testflag & T_CREATE_MISSING_KEYS) ||
|
||||
@ -2865,7 +2878,8 @@ int maria_repair(HA_CHECK *param, register MARIA_HA *info,
|
||||
{
|
||||
fputs(" \r",stdout); fflush(stdout);
|
||||
}
|
||||
if (mysql_file_chsize(share->kfile.file,
|
||||
if (!share->internal_table &&
|
||||
mysql_file_chsize(share->kfile.file,
|
||||
share->state.state.key_file_length, 0, MYF(0)))
|
||||
{
|
||||
_ma_check_print_warning(param,
|
||||
@ -4176,7 +4190,8 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
|
||||
if (param->testflag & T_CALC_CHECKSUM)
|
||||
share->state.state.checksum=param->glob_crc;
|
||||
|
||||
if (mysql_file_chsize(share->kfile.file,
|
||||
if (!share->internal_table &&
|
||||
mysql_file_chsize(share->kfile.file,
|
||||
share->state.state.key_file_length, 0, MYF(0)))
|
||||
_ma_check_print_warning(param,
|
||||
"Can't change size of indexfile, error: %d",
|
||||
@ -4724,7 +4739,8 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
|
||||
if (param->testflag & T_CALC_CHECKSUM)
|
||||
share->state.state.checksum=param->glob_crc;
|
||||
|
||||
if (mysql_file_chsize(share->kfile.file,
|
||||
if (!share->internal_table &&
|
||||
mysql_file_chsize(share->kfile.file,
|
||||
share->state.state.key_file_length, 0, MYF(0)))
|
||||
_ma_check_print_warning(param,
|
||||
"Can't change size of indexfile, error: %d",
|
||||
@ -6135,6 +6151,8 @@ int maria_test_if_almost_full(MARIA_HA *info)
|
||||
{
|
||||
MARIA_SHARE *share= info->s;
|
||||
|
||||
DBUG_ASSERT(!share->internal_table);
|
||||
|
||||
if (share->options & HA_OPTION_COMPRESS_RECORD)
|
||||
return 0;
|
||||
return mysql_file_seek(share->kfile.file, 0L, MY_SEEK_END,
|
||||
|
@ -130,9 +130,17 @@ int maria_delete_all_rows(MARIA_HA *info)
|
||||
#endif
|
||||
|
||||
if (_ma_flush_table_files(info, MARIA_FLUSH_DATA|MARIA_FLUSH_INDEX,
|
||||
FLUSH_IGNORE_CHANGED, FLUSH_IGNORE_CHANGED) ||
|
||||
mysql_file_chsize(info->dfile.file, 0, 0, MYF(MY_WME)) ||
|
||||
mysql_file_chsize(share->kfile.file, share->base.keystart, 0, MYF(MY_WME)))
|
||||
FLUSH_IGNORE_CHANGED, FLUSH_IGNORE_CHANGED))
|
||||
goto err;
|
||||
/*
|
||||
Avoid truncate of internal temporary tables as this can have a big
|
||||
performance overhead when called by mysql_handle_single_derived()
|
||||
tables in MariaDB as part of split materialization.
|
||||
*/
|
||||
if (!share->internal_table &&
|
||||
(mysql_file_chsize(info->dfile.file, 0, 0, MYF(MY_WME)) ||
|
||||
mysql_file_chsize(share->kfile.file, share->base.keystart, 0,
|
||||
MYF(MY_WME))))
|
||||
goto err;
|
||||
|
||||
if (_ma_initialize_data_file(share, info->dfile.file))
|
||||
|
Reference in New Issue
Block a user