1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00

MDEV-24730 Insert log operation fails after purge resets n_core_fields

Online log for insert operation of redundant table fails with
index->is_instant() assert. Purge can reset the n_core_fields when
alter is waiting to upgrade MDL for commit phase of DDL. In the
meantime, any insert DML tries to log the operation fails with
index is not being instant.

row_log_get_n_core_fields(): Get the n_core_fields of online log
for the given index.

rec_get_converted_size_comp_prefix_low(): Use n_core_fields of online
log when InnoDB calculates the size of data tuple during redundant
row format table rebuild.

rec_convert_dtuple_to_rec_comp(): Use n_core_fields of online log
when InnoDB does the conversion of data tuple to record during
redudant row format table rebuild.

- Adding the test case which has more than 129 instant columns.
This commit is contained in:
Thirunarayanan Balathandayuthapani
2021-03-06 05:55:14 +05:30
parent 08e8ad7c71
commit eb7c5530ec
8 changed files with 94 additions and 36 deletions

View File

@ -190,6 +190,12 @@ SET DEBUG_SYNC='RESET';
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 SET a=0; INSERT INTO t1 SET a=0;
ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2, ADD COLUMN c INT; ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2, ADD COLUMN c INT;
BEGIN NOT ATOMIC
DECLARE c TEXT DEFAULT(SELECT CONCAT('ALTER TABLE t1 ADD (c',
GROUP_CONCAT(seq SEPARATOR ' INT, c'), ' INT), ALGORITHM=INSTANT;') FROM seq_1_to_130);
EXECUTE IMMEDIATE c;
END;
$$
connection stop_purge; connection stop_purge;
START TRANSACTION WITH CONSISTENT SNAPSHOT; START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default; connection default;
@ -207,7 +213,7 @@ SET DEBUG_SYNC = 'now SIGNAL logged';
connection ddl; connection ddl;
connection default; connection default;
SET DEBUG_SYNC = RESET; SET DEBUG_SYNC = RESET;
SELECT * FROM t1; SELECT a, b, c FROM t1;
a b c a b c
1 2 NULL 1 2 NULL
2 3 4 2 3 4
@ -231,7 +237,7 @@ ERROR 22004: Invalid use of NULL value
disconnect ddl; disconnect ddl;
connection default; connection default;
SET DEBUG_SYNC = RESET; SET DEBUG_SYNC = RESET;
SELECT * FROM t1; SELECT a, b, c, d FROM t1;
a b c d a b c d
1 2 NULL 1 1 2 NULL 1
2 3 4 1 2 3 4 1

View File

@ -0,0 +1,4 @@
[redundant]
innodb_default_row_format=redundant
[dynamic]
innodb_default_row_format=dynamic

View File

@ -1,6 +1,7 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_debug.inc --source include/have_debug.inc
--source include/have_debug_sync.inc --source include/have_debug_sync.inc
--source include/have_sequence.inc
SET @save_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET @save_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1; SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
@ -212,6 +213,15 @@ CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 SET a=0; INSERT INTO t1 SET a=0;
ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2, ADD COLUMN c INT; ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2, ADD COLUMN c INT;
DELIMITER $$;
BEGIN NOT ATOMIC
DECLARE c TEXT DEFAULT(SELECT CONCAT('ALTER TABLE t1 ADD (c',
GROUP_CONCAT(seq SEPARATOR ' INT, c'), ' INT), ALGORITHM=INSTANT;') FROM seq_1_to_130);
EXECUTE IMMEDIATE c;
END;
$$
DELIMITER ;$$
connection stop_purge; connection stop_purge;
START TRANSACTION WITH CONSISTENT SNAPSHOT; START TRANSACTION WITH CONSISTENT SNAPSHOT;
@ -238,7 +248,7 @@ reap;
connection default; connection default;
SET DEBUG_SYNC = RESET; SET DEBUG_SYNC = RESET;
SELECT * FROM t1; SELECT a, b, c FROM t1;
--echo # --echo #
--echo # MDEV-15872 Crash in online ALTER TABLE...ADD PRIMARY KEY --echo # MDEV-15872 Crash in online ALTER TABLE...ADD PRIMARY KEY
@ -265,7 +275,7 @@ disconnect ddl;
connection default; connection default;
SET DEBUG_SYNC = RESET; SET DEBUG_SYNC = RESET;
SELECT * FROM t1; SELECT a, b, c, d FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo # --echo #

View File

@ -982,6 +982,7 @@ rec_copy(
@param[out] extra record header size @param[out] extra record header size
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED @param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
@return total size, in bytes */ @return total size, in bytes */
template<bool redundant_temp>
ulint ulint
rec_get_converted_size_temp( rec_get_converted_size_temp(
const dict_index_t* index, const dict_index_t* index,
@ -1026,6 +1027,7 @@ rec_init_offsets_temp(
@param[in] n_fields number of data fields @param[in] n_fields number of data fields
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED @param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
*/ */
template<bool redundant_temp>
void void
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp(
rec_t* rec, rec_t* rec,

View File

@ -247,6 +247,11 @@ row_log_apply(
ut_stage_alter_t* stage) ut_stage_alter_t* stage)
MY_ATTRIBUTE((warn_unused_result)); MY_ATTRIBUTE((warn_unused_result));
/** Get the n_core_fields of online log for the index
@param index index whose n_core_fields of log to be accessed
@return number of n_core_fields */
unsigned row_log_get_n_core_fields(const dict_index_t *index);
#ifdef HAVE_PSI_STAGE_INTERFACE #ifdef HAVE_PSI_STAGE_INTERFACE
/** Estimate how much work is to be done by the log apply phase /** Estimate how much work is to be done by the log apply phase
of an ALTER TABLE for this index. of an ALTER TABLE for this index.

View File

@ -29,6 +29,7 @@ Created 5/30/1994 Heikki Tuuri
#include "mtr0log.h" #include "mtr0log.h"
#include "fts0fts.h" #include "fts0fts.h"
#include "trx0sys.h" #include "trx0sys.h"
#include "row0log.h"
/* PHYSICAL RECORD (OLD STYLE) /* PHYSICAL RECORD (OLD STYLE)
=========================== ===========================
@ -1044,6 +1045,7 @@ rec_get_nth_field_offs_old(
/**********************************************************//** /**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT. Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */ @return total size */
template<bool redundant_temp>
MY_ATTRIBUTE((warn_unused_result, nonnull(1,2))) MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)))
static inline static inline
ulint ulint
@ -1068,16 +1070,19 @@ rec_get_converted_size_comp_prefix_low(
ut_d(ulint n_null = index->n_nullable); ut_d(ulint n_null = index->n_nullable);
ut_ad(status == REC_STATUS_ORDINARY || status == REC_STATUS_NODE_PTR ut_ad(status == REC_STATUS_ORDINARY || status == REC_STATUS_NODE_PTR
|| status == REC_STATUS_COLUMNS_ADDED); || status == REC_STATUS_COLUMNS_ADDED);
unsigned n_core_fields = redundant_temp
? row_log_get_n_core_fields(index)
: index->n_core_fields;
if (status == REC_STATUS_COLUMNS_ADDED if (status == REC_STATUS_COLUMNS_ADDED
&& (!temp || n_fields > index->n_core_fields)) { && (!temp || n_fields > n_core_fields)) {
ut_ad(index->is_instant()); if (!redundant_temp) { ut_ad(index->is_instant()); }
ut_ad(UT_BITS_IN_BYTES(n_null) >= index->n_core_null_bytes); ut_ad(UT_BITS_IN_BYTES(n_null) >= index->n_core_null_bytes);
extra_size += UT_BITS_IN_BYTES(index->get_n_nullable(n_fields)) extra_size += UT_BITS_IN_BYTES(index->get_n_nullable(n_fields))
+ rec_get_n_add_field_len(n_fields - 1 + rec_get_n_add_field_len(n_fields - 1
- index->n_core_fields); - n_core_fields);
} else { } else {
ut_ad(n_fields <= index->n_core_fields); ut_ad(n_fields <= n_core_fields);
extra_size += index->n_core_null_bytes; extra_size += index->n_core_null_bytes;
} }
@ -1192,7 +1197,7 @@ rec_get_converted_size_comp_prefix(
ulint* extra) /*!< out: extra size */ ulint* extra) /*!< out: extra size */
{ {
ut_ad(dict_table_is_comp(index->table)); ut_ad(dict_table_is_comp(index->table));
return(rec_get_converted_size_comp_prefix_low( return(rec_get_converted_size_comp_prefix_low<false>(
index, fields, n_fields, extra, index, fields, n_fields, extra,
REC_STATUS_ORDINARY, false)); REC_STATUS_ORDINARY, false));
} }
@ -1224,7 +1229,7 @@ rec_get_converted_size_comp(
case REC_STATUS_COLUMNS_ADDED: case REC_STATUS_COLUMNS_ADDED:
ut_ad(n_fields >= index->n_core_fields); ut_ad(n_fields >= index->n_core_fields);
ut_ad(n_fields <= index->n_fields); ut_ad(n_fields <= index->n_fields);
return rec_get_converted_size_comp_prefix_low( return rec_get_converted_size_comp_prefix_low<false>(
index, fields, n_fields, extra, status, false); index, fields, n_fields, extra, status, false);
case REC_STATUS_NODE_PTR: case REC_STATUS_NODE_PTR:
n_fields--; n_fields--;
@ -1232,7 +1237,7 @@ rec_get_converted_size_comp(
index)); index));
ut_ad(dfield_get_len(&fields[n_fields]) == REC_NODE_PTR_SIZE); ut_ad(dfield_get_len(&fields[n_fields]) == REC_NODE_PTR_SIZE);
return REC_NODE_PTR_SIZE /* child page number */ return REC_NODE_PTR_SIZE /* child page number */
+ rec_get_converted_size_comp_prefix_low( + rec_get_converted_size_comp_prefix_low<false>(
index, fields, n_fields, extra, status, false); index, fields, n_fields, extra, status, false);
case REC_STATUS_INFIMUM: case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM: case REC_STATUS_SUPREMUM:
@ -1418,6 +1423,7 @@ rec_convert_dtuple_to_rec_old(
@param[in] status rec_get_status(rec) @param[in] status rec_get_status(rec)
@param[in] temp whether to use the format for temporary files @param[in] temp whether to use the format for temporary files
in index creation */ in index creation */
template<bool redundant_temp>
static inline static inline
void void
rec_convert_dtuple_to_rec_comp( rec_convert_dtuple_to_rec_comp(
@ -1439,7 +1445,9 @@ rec_convert_dtuple_to_rec_comp(
ulint UNINIT_VAR(n_node_ptr_field); ulint UNINIT_VAR(n_node_ptr_field);
ulint fixed_len; ulint fixed_len;
ulint null_mask = 1; ulint null_mask = 1;
const ulint n_core_fields = redundant_temp
? row_log_get_n_core_fields(index)
: index->n_core_fields;
ut_ad(n_fields > 0); ut_ad(n_fields > 0);
ut_ad(temp || dict_table_is_comp(index->table)); ut_ad(temp || dict_table_is_comp(index->table));
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable)); ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
@ -1448,16 +1456,15 @@ rec_convert_dtuple_to_rec_comp(
switch (status) { switch (status) {
case REC_STATUS_COLUMNS_ADDED: case REC_STATUS_COLUMNS_ADDED:
ut_ad(index->is_instant()); if (!redundant_temp) { ut_ad(index->is_instant()); }
ut_ad(n_fields > index->n_core_fields); ut_ad(n_fields > n_core_fields);
rec_set_n_add_field(nulls, n_fields - 1 rec_set_n_add_field(nulls, n_fields - 1 - n_core_fields);
- index->n_core_fields);
/* fall through */ /* fall through */
case REC_STATUS_ORDINARY: case REC_STATUS_ORDINARY:
ut_ad(n_fields <= dict_index_get_n_fields(index)); ut_ad(n_fields <= dict_index_get_n_fields(index));
if (!temp) { if (!temp) {
rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW); rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW);
rec_set_status(rec, n_fields == index->n_core_fields rec_set_status(rec, n_fields == n_core_fields
? REC_STATUS_ORDINARY ? REC_STATUS_ORDINARY
: REC_STATUS_COLUMNS_ADDED); : REC_STATUS_COLUMNS_ADDED);
} if (dict_table_is_comp(index->table)) { } if (dict_table_is_comp(index->table)) {
@ -1479,8 +1486,8 @@ rec_convert_dtuple_to_rec_comp(
rec_set_status(rec, status); rec_set_status(rec, status);
ut_ad(n_fields ut_ad(n_fields
== dict_index_get_n_unique_in_tree_nonleaf(index) + 1); == dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
ut_d(n_null = std::min(index->n_core_null_bytes * 8U, ut_d(n_null = std::min<unsigned>(index->n_core_null_bytes * 8U,
index->n_nullable)); index->n_nullable));
n_node_ptr_field = n_fields - 1; n_node_ptr_field = n_fields - 1;
lens = nulls - index->n_core_null_bytes; lens = nulls - index->n_core_null_bytes;
break; break;
@ -1615,7 +1622,7 @@ rec_convert_dtuple_to_rec_new(
index, status, dtuple->fields, dtuple->n_fields, &extra_size); index, status, dtuple->fields, dtuple->n_fields, &extra_size);
rec_t* rec = buf + extra_size; rec_t* rec = buf + extra_size;
rec_convert_dtuple_to_rec_comp( rec_convert_dtuple_to_rec_comp<false>(
rec, index, dtuple->fields, dtuple->n_fields, status, false); rec, index, dtuple->fields, dtuple->n_fields, status, false);
rec_set_info_bits_new(rec, dtuple->info_bits & ~REC_NEW_STATUS_MASK); rec_set_info_bits_new(rec, dtuple->info_bits & ~REC_NEW_STATUS_MASK);
return(rec); return(rec);
@ -1659,6 +1666,7 @@ rec_convert_dtuple_to_rec(
@param[out] extra record header size @param[out] extra record header size
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED @param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
@return total size, in bytes */ @return total size, in bytes */
template<bool redundant_temp>
ulint ulint
rec_get_converted_size_temp( rec_get_converted_size_temp(
const dict_index_t* index, const dict_index_t* index,
@ -1667,10 +1675,18 @@ rec_get_converted_size_temp(
ulint* extra, ulint* extra,
rec_comp_status_t status) rec_comp_status_t status)
{ {
return rec_get_converted_size_comp_prefix_low( return rec_get_converted_size_comp_prefix_low<redundant_temp>(
index, fields, n_fields, extra, status, true); index, fields, n_fields, extra, status, true);
} }
template ulint rec_get_converted_size_temp<false>(
const dict_index_t*, const dfield_t*, ulint, ulint*,
rec_comp_status_t);
template ulint rec_get_converted_size_temp<true>(
const dict_index_t*, const dfield_t*, ulint, ulint*,
rec_comp_status_t);
/** Determine the offset to each field in temporary file. /** Determine the offset to each field in temporary file.
@param[in] rec temporary file record @param[in] rec temporary file record
@param[in] index index of that the record belongs to @param[in] index index of that the record belongs to
@ -1723,6 +1739,7 @@ rec_init_offsets_temp(
@param[in] n_fields number of data fields @param[in] n_fields number of data fields
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED @param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
*/ */
template<bool redundant_temp>
void void
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp(
rec_t* rec, rec_t* rec,
@ -1731,10 +1748,18 @@ rec_convert_dtuple_to_temp(
ulint n_fields, ulint n_fields,
rec_comp_status_t status) rec_comp_status_t status)
{ {
rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields, rec_convert_dtuple_to_rec_comp<redundant_temp>(
status, true); rec, index, fields, n_fields, status, true);
} }
template void rec_convert_dtuple_to_temp<false>(
rec_t*, const dict_index_t*, const dfield_t*,
ulint, rec_comp_status_t);
template void rec_convert_dtuple_to_temp<true>(
rec_t*, const dict_index_t*, const dfield_t*,
ulint, rec_comp_status_t);
/** Copy the first n fields of a (copy of a) physical record to a data tuple. /** Copy the first n fields of a (copy of a) physical record to a data tuple.
The fields are copied into the memory heap. The fields are copied into the memory heap.
@param[out] tuple data tuple @param[out] tuple data tuple

View File

@ -351,7 +351,7 @@ row_log_online_op(
row_merge_buf_encode(), because here we do not encode row_merge_buf_encode(), because here we do not encode
extra_size+1 (and reserve 0 as the end-of-chunk marker). */ extra_size+1 (and reserve 0 as the end-of-chunk marker). */
size = rec_get_converted_size_temp( size = rec_get_converted_size_temp<false>(
index, tuple->fields, tuple->n_fields, &extra_size); index, tuple->fields, tuple->n_fields, &extra_size);
ut_ad(size >= extra_size); ut_ad(size >= extra_size);
ut_ad(size <= sizeof log->tail.buf); ut_ad(size <= sizeof log->tail.buf);
@ -399,7 +399,7 @@ row_log_online_op(
*b++ = (byte) extra_size; *b++ = (byte) extra_size;
} }
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp<false>(
b + extra_size, index, tuple->fields, tuple->n_fields); b + extra_size, index, tuple->fields, tuple->n_fields);
b += size; b += size;
@ -741,7 +741,7 @@ row_log_table_delete(
old_pk, old_pk->n_fields - 2)->len); old_pk, old_pk->n_fields - 2)->len);
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field( ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len); old_pk, old_pk->n_fields - 1)->len);
old_pk_size = rec_get_converted_size_temp( old_pk_size = rec_get_converted_size_temp<false>(
new_index, old_pk->fields, old_pk->n_fields, new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size); &old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100); ut_ad(old_pk_extra_size < 0x100);
@ -754,7 +754,7 @@ row_log_table_delete(
*b++ = ROW_T_DELETE; *b++ = ROW_T_DELETE;
*b++ = static_cast<byte>(old_pk_extra_size); *b++ = static_cast<byte>(old_pk_extra_size);
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp<false>(
b + old_pk_extra_size, new_index, b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields); old_pk->fields, old_pk->n_fields);
@ -854,7 +854,7 @@ row_log_table_low_redundant(
rec_comp_status_t status = is_instant rec_comp_status_t status = is_instant
? REC_STATUS_COLUMNS_ADDED : REC_STATUS_ORDINARY; ? REC_STATUS_COLUMNS_ADDED : REC_STATUS_ORDINARY;
size = rec_get_converted_size_temp( size = rec_get_converted_size_temp<true>(
index, tuple->fields, tuple->n_fields, &extra_size, status); index, tuple->fields, tuple->n_fields, &extra_size, status);
if (is_instant) { if (is_instant) {
size++; size++;
@ -874,7 +874,7 @@ row_log_table_low_redundant(
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field( ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len); old_pk, old_pk->n_fields - 1)->len);
old_pk_size = rec_get_converted_size_temp( old_pk_size = rec_get_converted_size_temp<false>(
new_index, old_pk->fields, old_pk->n_fields, new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size); &old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100); ut_ad(old_pk_extra_size < 0x100);
@ -891,7 +891,7 @@ row_log_table_low_redundant(
if (old_pk_size) { if (old_pk_size) {
*b++ = static_cast<byte>(old_pk_extra_size); *b++ = static_cast<byte>(old_pk_extra_size);
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp<false>(
b + old_pk_extra_size, new_index, b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields); old_pk->fields, old_pk->n_fields);
b += old_pk_size; b += old_pk_size;
@ -914,7 +914,7 @@ row_log_table_low_redundant(
*b = status; *b = status;
} }
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp<true>(
b + extra_size, index, tuple->fields, tuple->n_fields, b + extra_size, index, tuple->fields, tuple->n_fields,
status); status);
b += size; b += size;
@ -1036,7 +1036,7 @@ row_log_table_low(
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field( ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len); old_pk, old_pk->n_fields - 1)->len);
old_pk_size = rec_get_converted_size_temp( old_pk_size = rec_get_converted_size_temp<false>(
new_index, old_pk->fields, old_pk->n_fields, new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size); &old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100); ut_ad(old_pk_extra_size < 0x100);
@ -1052,7 +1052,7 @@ row_log_table_low(
if (old_pk_size) { if (old_pk_size) {
*b++ = static_cast<byte>(old_pk_extra_size); *b++ = static_cast<byte>(old_pk_extra_size);
rec_convert_dtuple_to_temp( rec_convert_dtuple_to_temp<false>(
b + old_pk_extra_size, new_index, b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields); old_pk->fields, old_pk->n_fields);
b += old_pk_size; b += old_pk_size;
@ -4040,3 +4040,9 @@ row_log_apply(
DBUG_RETURN(error); DBUG_RETURN(error);
} }
unsigned row_log_get_n_core_fields(const dict_index_t *index)
{
ut_ad(index->online_log);
return index->online_log->n_core_fields;
}

View File

@ -308,7 +308,7 @@ row_merge_buf_encode(
ulint size; ulint size;
ulint extra_size; ulint extra_size;
size = rec_get_converted_size_temp( size = rec_get_converted_size_temp<false>(
index, entry->fields, n_fields, &extra_size); index, entry->fields, n_fields, &extra_size);
ut_ad(size >= extra_size); ut_ad(size >= extra_size);
@ -321,7 +321,7 @@ row_merge_buf_encode(
*(*b)++ = (byte) (extra_size + 1); *(*b)++ = (byte) (extra_size + 1);
} }
rec_convert_dtuple_to_temp(*b + extra_size, index, rec_convert_dtuple_to_temp<false>(*b + extra_size, index,
entry->fields, n_fields); entry->fields, n_fields);
*b += size; *b += size;
@ -796,7 +796,7 @@ row_merge_buf_add(
ulint size; ulint size;
ulint extra; ulint extra;
size = rec_get_converted_size_temp( size = rec_get_converted_size_temp<false>(
index, entry->fields, n_fields, &extra); index, entry->fields, n_fields, &extra);
ut_ad(data_size + extra_size == size); ut_ad(data_size + extra_size == size);