mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Merge 10.2 into 10.3
This commit is contained in:
@ -473,7 +473,7 @@ sub mtr_report_stats ($$$$) {
|
||||
$comment =~ s/[\"]//g;
|
||||
|
||||
# if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
|
||||
if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'}) {
|
||||
if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'} > 0) {
|
||||
my $logcontents = $test->{'logfile-failed'} || $test->{'logfile'};
|
||||
|
||||
$xml_report .= qq(>\n\t\t\t<failure message="" type="MTR_RES_FAILED">\n<![CDATA[$logcontents]]>\n\t\t\t</failure>\n\t\t</testcase>\n);
|
||||
@ -639,6 +639,8 @@ sub mtr_error (@) {
|
||||
}
|
||||
else
|
||||
{
|
||||
use Carp qw(cluck);
|
||||
cluck "Error happened" if $verbose > 0;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -947,8 +947,14 @@ sub run_test_server ($$$) {
|
||||
if ( $result->is_failed() ) {
|
||||
my $worker_logdir= $result->{savedir};
|
||||
my $log_file_name=dirname($worker_logdir)."/".$result->{shortname}.".log";
|
||||
|
||||
if (-e $log_file_name) {
|
||||
$result->{'logfile-failed'} = mtr_lastlinesfromfile($log_file_name, 20);
|
||||
rename $log_file_name,$log_file_name.".failed";
|
||||
} else {
|
||||
$result->{'logfile-failed'} = "";
|
||||
}
|
||||
|
||||
rename $log_file_name, $log_file_name.".failed";
|
||||
}
|
||||
delete($result->{result});
|
||||
$result->{retries}= $retries+1;
|
||||
|
@ -1,24 +1,22 @@
|
||||
connection node_1;
|
||||
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
|
||||
CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
|
||||
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
|
||||
connection node_2;
|
||||
SET SESSION wsrep_retry_autocommit = 0;
|
||||
INSERT INTO t1(f1) SELECT 1 FROM ten as a1, ten AS a2;
|
||||
set debug_sync='ha_commit_trans_after_prepare WAIT_FOR go';
|
||||
INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6;;
|
||||
INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6, ten AS a7, ten AS a8;
|
||||
connection node_1;
|
||||
TRUNCATE TABLE t1;;
|
||||
connection node_1;
|
||||
connection node_2;
|
||||
ERROR 40001: Deadlock: wsrep aborted transaction
|
||||
connection node_2;
|
||||
SELECT COUNT(*) = 0 FROM t1;
|
||||
COUNT(*) = 0
|
||||
1
|
||||
connection node_1;
|
||||
SELECT COUNT(*) = 0 FROM t1;
|
||||
COUNT(*) = 0
|
||||
1
|
||||
connection node_2;
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t1;
|
||||
EXPECT_0
|
||||
0
|
||||
connection node_1;
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t1;
|
||||
EXPECT_0
|
||||
0
|
||||
DROP TABLE t1;
|
||||
DROP TABLE ten;
|
||||
|
@ -4,50 +4,42 @@
|
||||
#
|
||||
|
||||
--source include/galera_cluster.inc
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
#
|
||||
# INSERT and TRUNCATE on different nodes
|
||||
#
|
||||
|
||||
--connection node_1
|
||||
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
|
||||
CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
|
||||
|
||||
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
|
||||
|
||||
# Insert 1m rows
|
||||
--connection node_2
|
||||
--let $wait_condition = SELECT COUNT(*) = 10 FROM ten;
|
||||
--source include/wait_condition.inc
|
||||
|
||||
# Prevent autocommit retring from masking the deadlock error we expect to get
|
||||
SET SESSION wsrep_retry_autocommit = 0;
|
||||
INSERT INTO t1(f1) SELECT 1 FROM ten as a1, ten AS a2;
|
||||
|
||||
set debug_sync='ha_commit_trans_after_prepare WAIT_FOR go';
|
||||
--send INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6;
|
||||
--send INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6, ten AS a7, ten AS a8
|
||||
|
||||
--connection node_1
|
||||
# Wait for a above insert to start
|
||||
--let $wait_condition = SELECT COUNT(*) >= 100 from t1;
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--send TRUNCATE TABLE t1;
|
||||
|
||||
--connection node_1
|
||||
--reap
|
||||
|
||||
--connection node_2
|
||||
--error ER_LOCK_DEADLOCK
|
||||
--reap
|
||||
|
||||
--connection node_1
|
||||
--reap
|
||||
|
||||
--connection node_2
|
||||
SELECT COUNT(*) = 0 FROM t1;
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t1;
|
||||
|
||||
--connection node_1
|
||||
SELECT COUNT(*) = 0 FROM t1;
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE ten;
|
||||
|
@ -17,13 +17,12 @@ set global debug_dbug=@old_dbug;
|
||||
|
||||
connection master;
|
||||
|
||||
### Dump thread is hanging despite slave has gracefully exited.
|
||||
let $id=`SELECT id from information_schema.processlist where command='Binlog Dump'`;
|
||||
|
||||
if ($id) {
|
||||
replace_result $id DUMP_THREAD;
|
||||
eval kill $id;
|
||||
let $wait_condition= SELECT count(*)=0 from information_schema.processlist where command='Binlog Dump';
|
||||
let $wait_condition= SELECT count(*)=0 from information_schema.processlist where command='Killed';
|
||||
source include/wait_condition.inc;
|
||||
}
|
||||
|
||||
|
@ -736,7 +736,7 @@ VARIABLE_SCOPE GLOBAL
|
||||
VARIABLE_TYPE INT UNSIGNED
|
||||
VARIABLE_COMMENT Number of threads performing background key rotation and scrubbing
|
||||
NUMERIC_MIN_VALUE 0
|
||||
NUMERIC_MAX_VALUE 4294967295
|
||||
NUMERIC_MAX_VALUE 255
|
||||
NUMERIC_BLOCK_SIZE 0
|
||||
ENUM_VALUE_LIST NULL
|
||||
READ_ONLY NO
|
||||
|
@ -2991,10 +2991,7 @@ fseg_free_extent(
|
||||
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
|
||||
== FSEG_MAGIC_N_VALUE);
|
||||
ut_d(space->modify_check(*mtr));
|
||||
|
||||
#if defined BTR_CUR_HASH_ADAPT || defined UNIV_DEBUG
|
||||
const ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
|
||||
#endif /* BTR_CUR_HASH_ADAPT || UNIV_DEBUG */
|
||||
ut_d(ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE));
|
||||
|
||||
if (xdes_is_full(descr, mtr)) {
|
||||
flst_remove(seg_inode + FSEG_FULL,
|
||||
|
@ -583,7 +583,7 @@ fts_cache_init(
|
||||
|
||||
mutex_enter((ib_mutex_t*) &cache->deleted_lock);
|
||||
cache->deleted_doc_ids = ib_vector_create(
|
||||
cache->sync_heap, sizeof(fts_update_t), 4);
|
||||
cache->sync_heap, sizeof(doc_id_t), 4);
|
||||
mutex_exit((ib_mutex_t*) &cache->deleted_lock);
|
||||
|
||||
/* Reset the cache data for all the FTS indexes. */
|
||||
@ -2610,11 +2610,11 @@ dberr_t
|
||||
fts_cmp_set_sync_doc_id(
|
||||
/*====================*/
|
||||
const dict_table_t* table, /*!< in: table */
|
||||
doc_id_t doc_id_cmp, /*!< in: Doc ID to compare */
|
||||
doc_id_t cmp_doc_id, /*!< in: Doc ID to compare */
|
||||
ibool read_only, /*!< in: TRUE if read the
|
||||
synced_doc_id only */
|
||||
doc_id_t* doc_id) /*!< out: larger document id
|
||||
after comparing "doc_id_cmp"
|
||||
after comparing "cmp_doc_id"
|
||||
to the one stored in CONFIG
|
||||
table */
|
||||
{
|
||||
@ -2685,10 +2685,10 @@ retry:
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
if (doc_id_cmp == 0 && *doc_id) {
|
||||
if (cmp_doc_id == 0 && *doc_id) {
|
||||
cache->synced_doc_id = *doc_id - 1;
|
||||
} else {
|
||||
cache->synced_doc_id = ut_max(doc_id_cmp, *doc_id);
|
||||
cache->synced_doc_id = ut_max(cmp_doc_id, *doc_id);
|
||||
}
|
||||
|
||||
mutex_enter(&cache->doc_id_lock);
|
||||
@ -2699,7 +2699,7 @@ retry:
|
||||
}
|
||||
mutex_exit(&cache->doc_id_lock);
|
||||
|
||||
if (doc_id_cmp > *doc_id) {
|
||||
if (cmp_doc_id > *doc_id) {
|
||||
error = fts_update_sync_doc_id(
|
||||
table, cache->synced_doc_id, trx);
|
||||
}
|
||||
@ -2821,7 +2821,7 @@ fts_doc_ids_create(void)
|
||||
fts_doc_ids->self_heap = ib_heap_allocator_create(heap);
|
||||
|
||||
fts_doc_ids->doc_ids = static_cast<ib_vector_t*>(ib_vector_create(
|
||||
fts_doc_ids->self_heap, sizeof(fts_update_t), 32));
|
||||
fts_doc_ids->self_heap, sizeof(doc_id_t), 32));
|
||||
|
||||
return(fts_doc_ids);
|
||||
}
|
||||
@ -3921,7 +3921,7 @@ fts_sync_add_deleted_cache(
|
||||
|
||||
ut_a(ib_vector_size(doc_ids) > 0);
|
||||
|
||||
ib_vector_sort(doc_ids, fts_update_doc_id_cmp);
|
||||
ib_vector_sort(doc_ids, fts_doc_id_cmp);
|
||||
|
||||
info = pars_info_create();
|
||||
|
||||
@ -3939,13 +3939,13 @@ fts_sync_add_deleted_cache(
|
||||
"BEGIN INSERT INTO $table_name VALUES (:doc_id);");
|
||||
|
||||
for (i = 0; i < n_elems && error == DB_SUCCESS; ++i) {
|
||||
fts_update_t* update;
|
||||
doc_id_t* update;
|
||||
doc_id_t write_doc_id;
|
||||
|
||||
update = static_cast<fts_update_t*>(ib_vector_get(doc_ids, i));
|
||||
update = static_cast<doc_id_t*>(ib_vector_get(doc_ids, i));
|
||||
|
||||
/* Convert to "storage" byte order. */
|
||||
fts_write_doc_id((byte*) &write_doc_id, update->doc_id);
|
||||
fts_write_doc_id((byte*) &write_doc_id, *update);
|
||||
fts_bind_doc_id(info, "doc_id", &write_doc_id);
|
||||
|
||||
error = fts_eval_sql(sync->trx, graph);
|
||||
@ -5270,12 +5270,12 @@ fts_cache_append_deleted_doc_ids(
|
||||
|
||||
|
||||
for (ulint i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) {
|
||||
fts_update_t* update;
|
||||
doc_id_t* update;
|
||||
|
||||
update = static_cast<fts_update_t*>(
|
||||
update = static_cast<doc_id_t*>(
|
||||
ib_vector_get(cache->deleted_doc_ids, i));
|
||||
|
||||
ib_vector_push(vector, &update->doc_id);
|
||||
ib_vector_push(vector, &update);
|
||||
}
|
||||
|
||||
mutex_exit((ib_mutex_t*) &cache->deleted_lock);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2016, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@ -920,7 +920,7 @@ fts_fetch_doc_ids(
|
||||
int i = 0;
|
||||
sel_node_t* sel_node = static_cast<sel_node_t*>(row);
|
||||
fts_doc_ids_t* fts_doc_ids = static_cast<fts_doc_ids_t*>(user_arg);
|
||||
fts_update_t* update = static_cast<fts_update_t*>(
|
||||
doc_id_t* update = static_cast<doc_id_t*>(
|
||||
ib_vector_push(fts_doc_ids->doc_ids, NULL));
|
||||
|
||||
for (exp = sel_node->select_list;
|
||||
@ -936,8 +936,7 @@ fts_fetch_doc_ids(
|
||||
/* Note: The column numbers below must match the SELECT. */
|
||||
switch (i) {
|
||||
case 0: /* DOC_ID */
|
||||
update->fts_indexes = NULL;
|
||||
update->doc_id = fts_read_doc_id(
|
||||
*update = fts_read_doc_id(
|
||||
static_cast<byte*>(data));
|
||||
break;
|
||||
|
||||
@ -1005,7 +1004,7 @@ fts_table_fetch_doc_ids(
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
ib_vector_sort(doc_ids->doc_ids, fts_update_doc_id_cmp);
|
||||
ib_vector_sort(doc_ids->doc_ids, fts_doc_id_cmp);
|
||||
}
|
||||
|
||||
if (alloc_bk_trx) {
|
||||
@ -1022,7 +1021,7 @@ Do a binary search for a doc id in the array
|
||||
int
|
||||
fts_bsearch(
|
||||
/*========*/
|
||||
fts_update_t* array, /*!< in: array to sort */
|
||||
doc_id_t* array, /*!< in: array to sort */
|
||||
int lower, /*!< in: the array lower bound */
|
||||
int upper, /*!< in: the array upper bound */
|
||||
doc_id_t doc_id) /*!< in: the doc id to search for */
|
||||
@ -1036,9 +1035,9 @@ fts_bsearch(
|
||||
while (lower < upper) {
|
||||
int i = (lower + upper) >> 1;
|
||||
|
||||
if (doc_id > array[i].doc_id) {
|
||||
if (doc_id > array[i]) {
|
||||
lower = i + 1;
|
||||
} else if (doc_id < array[i].doc_id) {
|
||||
} else if (doc_id < array[i]) {
|
||||
upper = i - 1;
|
||||
} else {
|
||||
return(i); /* Found. */
|
||||
@ -1047,7 +1046,7 @@ fts_bsearch(
|
||||
}
|
||||
|
||||
if (lower == upper && lower < orig_size) {
|
||||
if (doc_id == array[lower].doc_id) {
|
||||
if (doc_id == array[lower]) {
|
||||
return(lower);
|
||||
} else if (lower == 0) {
|
||||
return(-1);
|
||||
@ -1074,7 +1073,7 @@ fts_optimize_lookup(
|
||||
{
|
||||
int pos;
|
||||
int upper = static_cast<int>(ib_vector_size(doc_ids));
|
||||
fts_update_t* array = (fts_update_t*) doc_ids->data;
|
||||
doc_id_t* array = (doc_id_t*) doc_ids->data;
|
||||
|
||||
pos = fts_bsearch(array, static_cast<int>(lower), upper, first_doc_id);
|
||||
|
||||
@ -1087,10 +1086,10 @@ fts_optimize_lookup(
|
||||
/* If i is 1, it could be first_doc_id is less than
|
||||
either the first or second array item, do a
|
||||
double check */
|
||||
if (i == 1 && array[0].doc_id <= last_doc_id
|
||||
&& first_doc_id < array[0].doc_id) {
|
||||
if (i == 1 && array[0] <= last_doc_id
|
||||
&& first_doc_id < array[0]) {
|
||||
pos = 0;
|
||||
} else if (i < upper && array[i].doc_id <= last_doc_id) {
|
||||
} else if (i < upper && array[i] <= last_doc_id) {
|
||||
|
||||
/* Check if the "next" doc id is within the
|
||||
first & last doc id of the node. */
|
||||
@ -1229,12 +1228,12 @@ test_again:
|
||||
delta for decoding the entries following this document's
|
||||
entries. */
|
||||
if (*del_pos >= 0 && *del_pos < (int) ib_vector_size(del_vec)) {
|
||||
fts_update_t* update;
|
||||
doc_id_t* update;
|
||||
|
||||
update = (fts_update_t*) ib_vector_get(
|
||||
update = (doc_id_t*) ib_vector_get(
|
||||
del_vec, ulint(*del_pos));
|
||||
|
||||
del_doc_id = update->doc_id;
|
||||
del_doc_id = *update;
|
||||
}
|
||||
|
||||
if (enc->src_ilist_ptr == src_node->ilist && doc_id == 0) {
|
||||
@ -2020,7 +2019,7 @@ fts_optimize_purge_deleted_doc_ids(
|
||||
ulint i;
|
||||
pars_info_t* info;
|
||||
que_t* graph;
|
||||
fts_update_t* update;
|
||||
doc_id_t* update;
|
||||
doc_id_t write_doc_id;
|
||||
dberr_t error = DB_SUCCESS;
|
||||
char deleted[MAX_FULL_NAME_LEN];
|
||||
@ -2030,11 +2029,11 @@ fts_optimize_purge_deleted_doc_ids(
|
||||
|
||||
ut_a(ib_vector_size(optim->to_delete->doc_ids) > 0);
|
||||
|
||||
update = static_cast<fts_update_t*>(
|
||||
update = static_cast<doc_id_t*>(
|
||||
ib_vector_get(optim->to_delete->doc_ids, 0));
|
||||
|
||||
/* Convert to "storage" byte order. */
|
||||
fts_write_doc_id((byte*) &write_doc_id, update->doc_id);
|
||||
fts_write_doc_id((byte*) &write_doc_id, *update);
|
||||
|
||||
/* This is required for the SQL parser to work. It must be able
|
||||
to find the following variables. So we do it twice. */
|
||||
@ -2056,11 +2055,11 @@ fts_optimize_purge_deleted_doc_ids(
|
||||
/* Delete the doc ids that were copied at the start. */
|
||||
for (i = 0; i < ib_vector_size(optim->to_delete->doc_ids); ++i) {
|
||||
|
||||
update = static_cast<fts_update_t*>(ib_vector_get(
|
||||
update = static_cast<doc_id_t*>(ib_vector_get(
|
||||
optim->to_delete->doc_ids, i));
|
||||
|
||||
/* Convert to "storage" byte order. */
|
||||
fts_write_doc_id((byte*) &write_doc_id, update->doc_id);
|
||||
fts_write_doc_id((byte*) &write_doc_id, *update);
|
||||
|
||||
fts_bind_doc_id(info, "doc_id1", &write_doc_id);
|
||||
|
||||
|
@ -730,10 +730,10 @@ fts_query_union_doc_id(
|
||||
{
|
||||
ib_rbt_bound_t parent;
|
||||
ulint size = ib_vector_size(query->deleted->doc_ids);
|
||||
fts_update_t* array = (fts_update_t*) query->deleted->doc_ids->data;
|
||||
doc_id_t* updates = (doc_id_t*) query->deleted->doc_ids->data;
|
||||
|
||||
/* Check if the doc id is deleted and it's not already in our set. */
|
||||
if (fts_bsearch(array, 0, static_cast<int>(size), doc_id) < 0
|
||||
if (fts_bsearch(updates, 0, static_cast<int>(size), doc_id) < 0
|
||||
&& rbt_search(query->doc_ids, &parent, &doc_id) != 0) {
|
||||
|
||||
fts_ranking_t ranking;
|
||||
@ -761,10 +761,10 @@ fts_query_remove_doc_id(
|
||||
{
|
||||
ib_rbt_bound_t parent;
|
||||
ulint size = ib_vector_size(query->deleted->doc_ids);
|
||||
fts_update_t* array = (fts_update_t*) query->deleted->doc_ids->data;
|
||||
doc_id_t* updates = (doc_id_t*) query->deleted->doc_ids->data;
|
||||
|
||||
/* Check if the doc id is deleted and it's in our set. */
|
||||
if (fts_bsearch(array, 0, static_cast<int>(size), doc_id) < 0
|
||||
if (fts_bsearch(updates, 0, static_cast<int>(size), doc_id) < 0
|
||||
&& rbt_search(query->doc_ids, &parent, &doc_id) == 0) {
|
||||
ut_free(rbt_remove_node(query->doc_ids, parent.last));
|
||||
|
||||
@ -791,10 +791,10 @@ fts_query_change_ranking(
|
||||
{
|
||||
ib_rbt_bound_t parent;
|
||||
ulint size = ib_vector_size(query->deleted->doc_ids);
|
||||
fts_update_t* array = (fts_update_t*) query->deleted->doc_ids->data;
|
||||
doc_id_t* updates = (doc_id_t*) query->deleted->doc_ids->data;
|
||||
|
||||
/* Check if the doc id is deleted and it's in our set. */
|
||||
if (fts_bsearch(array, 0, static_cast<int>(size), doc_id) < 0
|
||||
if (fts_bsearch(updates, 0, static_cast<int>(size), doc_id) < 0
|
||||
&& rbt_search(query->doc_ids, &parent, &doc_id) == 0) {
|
||||
|
||||
fts_ranking_t* ranking;
|
||||
@ -828,7 +828,7 @@ fts_query_intersect_doc_id(
|
||||
{
|
||||
ib_rbt_bound_t parent;
|
||||
ulint size = ib_vector_size(query->deleted->doc_ids);
|
||||
fts_update_t* array = (fts_update_t*) query->deleted->doc_ids->data;
|
||||
doc_id_t* updates = (doc_id_t*) query->deleted->doc_ids->data;
|
||||
fts_ranking_t* ranking= NULL;
|
||||
|
||||
/* There are three types of intersect:
|
||||
@ -840,7 +840,7 @@ fts_query_intersect_doc_id(
|
||||
if it matches 'b' and it's in doc_ids.(multi_exist = true). */
|
||||
|
||||
/* Check if the doc id is deleted and it's in our set */
|
||||
if (fts_bsearch(array, 0, static_cast<int>(size), doc_id) < 0) {
|
||||
if (fts_bsearch(updates, 0, static_cast<int>(size), doc_id) < 0) {
|
||||
fts_ranking_t new_ranking;
|
||||
|
||||
if (rbt_search(query->doc_ids, &parent, &doc_id) != 0) {
|
||||
@ -3649,8 +3649,8 @@ fts_query_prepare_result(
|
||||
if (query->flags == FTS_OPT_RANKING) {
|
||||
fts_word_freq_t* word_freq;
|
||||
ulint size = ib_vector_size(query->deleted->doc_ids);
|
||||
fts_update_t* array =
|
||||
(fts_update_t*) query->deleted->doc_ids->data;
|
||||
doc_id_t* updates =
|
||||
(doc_id_t*) query->deleted->doc_ids->data;
|
||||
|
||||
node = rbt_first(query->word_freqs);
|
||||
ut_ad(node);
|
||||
@ -3665,7 +3665,7 @@ fts_query_prepare_result(
|
||||
doc_freq = rbt_value(fts_doc_freq_t, node);
|
||||
|
||||
/* Don't put deleted docs into result */
|
||||
if (fts_bsearch(array, 0, static_cast<int>(size),
|
||||
if (fts_bsearch(updates, 0, static_cast<int>(size),
|
||||
doc_freq->doc_id) >= 0) {
|
||||
/* one less matching doc count */
|
||||
--word_freq->doc_count;
|
||||
@ -4016,7 +4016,7 @@ fts_query(
|
||||
DEBUG_SYNC_C("fts_deleted_doc_ids_append");
|
||||
|
||||
/* Sort the vector so that we can do a binary search over the ids. */
|
||||
ib_vector_sort(query.deleted->doc_ids, fts_update_doc_id_cmp);
|
||||
ib_vector_sort(query.deleted->doc_ids, fts_doc_id_cmp);
|
||||
|
||||
/* Convert the query string to lower case before parsing. We own
|
||||
the ut_malloc'ed result and so remember to free it before return. */
|
||||
|
@ -20193,7 +20193,7 @@ static MYSQL_SYSVAR_UINT(encryption_threads, srv_n_fil_crypt_threads,
|
||||
"scrubbing",
|
||||
NULL,
|
||||
innodb_encryption_threads_update,
|
||||
srv_n_fil_crypt_threads, 0, UINT_MAX32, 0);
|
||||
0, 0, 255, 0);
|
||||
|
||||
static MYSQL_SYSVAR_UINT(encryption_rotate_key_age,
|
||||
srv_fil_crypt_rotate_key_age,
|
||||
|
@ -5031,6 +5031,7 @@ prepare_inplace_alter_table_dict(
|
||||
create_table_info_t info(ctx->prebuilt->trx->mysql_thd, altered_table,
|
||||
ha_alter_info->create_info, NULL, NULL,
|
||||
srv_file_per_table);
|
||||
ut_d(bool stats_wait = false);
|
||||
|
||||
/* The primary index would be rebuilt if a FTS Doc ID
|
||||
column is to be added, and the primary index definition
|
||||
@ -5082,6 +5083,7 @@ prepare_inplace_alter_table_dict(
|
||||
XXX what may happen if bg stats opens the table after we
|
||||
have unlocked data dictionary below? */
|
||||
dict_stats_wait_bg_to_stop_using_table(user_table, ctx->trx);
|
||||
ut_d(stats_wait = true);
|
||||
|
||||
online_retry_drop_indexes_low(ctx->new_table, ctx->trx);
|
||||
|
||||
@ -5986,7 +5988,8 @@ error_handled:
|
||||
/* n_ref_count must be 1, because purge cannot
|
||||
be executing on this very table as we are
|
||||
holding dict_operation_lock X-latch. */
|
||||
DBUG_ASSERT(user_table->get_ref_count() == 1 || ctx->online);
|
||||
ut_ad(!stats_wait || ctx->online
|
||||
|| user_table->get_ref_count() == 1);
|
||||
|
||||
online_retry_drop_indexes_with_trx(user_table, ctx->trx);
|
||||
} else {
|
||||
|
@ -239,7 +239,7 @@ Do a binary search for a doc id in the array
|
||||
int
|
||||
fts_bsearch(
|
||||
/*========*/
|
||||
fts_update_t* array, /*!< in: array to sort */
|
||||
doc_id_t* array, /*!< in: array to sort */
|
||||
int lower, /*!< in: lower bound of array*/
|
||||
int upper, /*!< in: upper bound of array*/
|
||||
doc_id_t doc_id) /*!< in: doc id to lookup */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@ -80,20 +80,6 @@ struct fts_index_cache_t {
|
||||
CHARSET_INFO* charset; /*!< charset */
|
||||
};
|
||||
|
||||
/** For supporting the tracking of updates on multiple FTS indexes we need
|
||||
to track which FTS indexes need to be updated. For INSERT and DELETE we
|
||||
update all fts indexes. */
|
||||
struct fts_update_t {
|
||||
doc_id_t doc_id; /*!< The doc id affected */
|
||||
|
||||
ib_vector_t* fts_indexes; /*!< The FTS indexes that need to be
|
||||
updated. A NULL value means all
|
||||
indexes need to be updated. This
|
||||
vector is not allocated on the heap
|
||||
and so must be freed explicitly,
|
||||
when we are done with it */
|
||||
};
|
||||
|
||||
/** Stop word control infotmation. */
|
||||
struct fts_stopword_t {
|
||||
ulint status; /*!< Status of the stopword tree */
|
||||
@ -319,10 +305,9 @@ fts_ranking_doc_id_cmp(
|
||||
const void* p2); /*!< in: id2 */
|
||||
|
||||
/******************************************************************//**
|
||||
Compare two fts_update_t instances doc_ids. */
|
||||
Compare two doc_ids. */
|
||||
UNIV_INLINE
|
||||
int
|
||||
fts_update_doc_id_cmp(
|
||||
int fts_doc_id_cmp(
|
||||
/*==================*/
|
||||
/*!< out:
|
||||
< 0 if n1 < n2,
|
||||
|
@ -79,19 +79,18 @@ fts_ranking_doc_id_cmp(
|
||||
}
|
||||
|
||||
/******************************************************************//**
|
||||
Compare two fts_update_t doc_ids.
|
||||
Compare two doc_ids.
|
||||
@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */
|
||||
UNIV_INLINE
|
||||
int
|
||||
fts_update_doc_id_cmp(
|
||||
int fts_doc_id_cmp(
|
||||
/*==================*/
|
||||
const void* p1, /*!< in: id1 */
|
||||
const void* p2) /*!< in: id2 */
|
||||
{
|
||||
const fts_update_t* up1 = (const fts_update_t*) p1;
|
||||
const fts_update_t* up2 = (const fts_update_t*) p2;
|
||||
const doc_id_t* up1 = static_cast<const doc_id_t*>(p1);
|
||||
const doc_id_t* up2 = static_cast<const doc_id_t*>(p2);
|
||||
|
||||
return((int)(up1->doc_id - up2->doc_id));
|
||||
return static_cast<int>(*up1 - *up2);
|
||||
}
|
||||
|
||||
/******************************************************************//**
|
||||
|
Reference in New Issue
Block a user