mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
MDEV-20949 Stop issuing 'row size' error on DML
Move row size check to early CREATE/ALTER TABLE phase. Stop checking on table open. dict_index_add_to_cache(): remove parameter 'strict', stop checking row size dict_index_t::record_size_info_t: this is a result of row size check operation create_table_info_t::row_size_is_acceptable(): performs row size check. Issues error or warning. Writes first overflow field to InnoDB log. create_table_info_t::create_table(): add row size check dict_index_t::record_size_info(): this is a refactored version of dict_index_t::rec_potentially_too_big(). New version doesn't change global state of a program but return all interesting info. And it's callers who decide how to handle row size overflow. dict_index_t::rec_potentially_too_big(): removed
This commit is contained in:
@ -136,8 +136,6 @@ v=@h,w=@h,x=@b,y=@h,z=@h,
|
||||
aa=@h,ba=@h,ca=@h,da=@h,ea=@h,fa=@h,ga=@h,ha=@h,ia=@h,ja=@h,
|
||||
ka=@h,la=@h,ma=@h,na=@h,oa=@h,pa=@h,qa=@h,ra=@h,sa=@h,ta=@b,ua=@h,
|
||||
va=@h,wa=@h,xa=@h,ya=@h,za=@h;
|
||||
Warnings:
|
||||
Warning 139 Row size too large (> 16318). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline.
|
||||
BEGIN;
|
||||
UPDATE t1 SET a=@g,b=@g,c=@g,d=@g,e=@g;
|
||||
UPDATE t1 SET f=@g,g=@g,h=@g,i=@g,j=@g;
|
||||
|
@ -1,3 +1,4 @@
|
||||
call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
|
||||
SET GLOBAL innodb_file_per_table=on;
|
||||
SET GLOBAL innodb_strict_mode=on;
|
||||
set old_alter_table=0;
|
||||
|
@ -1,3 +1,4 @@
|
||||
call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
|
||||
SET @large_prefix_orig = @@GLOBAL.innodb_large_prefix;
|
||||
CREATE TABLE worklog5743 (
|
||||
col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) ,
|
||||
|
@ -1,5 +1,7 @@
|
||||
-- source include/innodb_page_size_small.inc
|
||||
|
||||
call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
|
||||
|
||||
let $file_per_table=`select @@innodb_file_per_table`;
|
||||
|
||||
SET GLOBAL innodb_file_per_table=on;
|
||||
|
@ -15,6 +15,8 @@
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_innodb_16k.inc
|
||||
|
||||
call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
|
||||
|
||||
SET @large_prefix_orig = @@GLOBAL.innodb_large_prefix;
|
||||
|
||||
# Prefix index with VARCHAR data type , primary/secondary index and DML ops
|
||||
|
@ -361,8 +361,7 @@ dict_boot(void)
|
||||
|
||||
error = dict_index_add_to_cache(table, index,
|
||||
mach_read_from_4(dict_hdr
|
||||
+ DICT_HDR_TABLES),
|
||||
FALSE);
|
||||
+ DICT_HDR_TABLES));
|
||||
ut_a(error == DB_SUCCESS);
|
||||
|
||||
/*-------------------------*/
|
||||
@ -371,10 +370,8 @@ dict_boot(void)
|
||||
dict_mem_index_add_field(index, "ID", 0);
|
||||
|
||||
index->id = DICT_TABLE_IDS_ID;
|
||||
error = dict_index_add_to_cache(table, index,
|
||||
mach_read_from_4(dict_hdr
|
||||
+ DICT_HDR_TABLE_IDS),
|
||||
FALSE);
|
||||
error = dict_index_add_to_cache(
|
||||
table, index, mach_read_from_4(dict_hdr + DICT_HDR_TABLE_IDS));
|
||||
ut_a(error == DB_SUCCESS);
|
||||
|
||||
/*-------------------------*/
|
||||
@ -405,8 +402,7 @@ dict_boot(void)
|
||||
index->id = DICT_COLUMNS_ID;
|
||||
error = dict_index_add_to_cache(table, index,
|
||||
mach_read_from_4(dict_hdr
|
||||
+ DICT_HDR_COLUMNS),
|
||||
FALSE);
|
||||
+ DICT_HDR_COLUMNS));
|
||||
ut_a(error == DB_SUCCESS);
|
||||
|
||||
/*-------------------------*/
|
||||
@ -438,8 +434,7 @@ dict_boot(void)
|
||||
index->id = DICT_INDEXES_ID;
|
||||
error = dict_index_add_to_cache(table, index,
|
||||
mach_read_from_4(dict_hdr
|
||||
+ DICT_HDR_INDEXES),
|
||||
FALSE);
|
||||
+ DICT_HDR_INDEXES));
|
||||
ut_a(error == DB_SUCCESS);
|
||||
|
||||
/*-------------------------*/
|
||||
@ -465,8 +460,7 @@ dict_boot(void)
|
||||
index->id = DICT_FIELDS_ID;
|
||||
error = dict_index_add_to_cache(table, index,
|
||||
mach_read_from_4(dict_hdr
|
||||
+ DICT_HDR_FIELDS),
|
||||
FALSE);
|
||||
+ DICT_HDR_FIELDS));
|
||||
ut_a(error == DB_SUCCESS);
|
||||
|
||||
mtr_commit(&mtr);
|
||||
|
@ -1477,8 +1477,7 @@ dict_create_index_step(
|
||||
|
||||
if (node->state == INDEX_ADD_TO_CACHE) {
|
||||
err = dict_index_add_to_cache(
|
||||
node->table, node->index, FIL_NULL,
|
||||
trx_is_strict(trx), node->add_v);
|
||||
node->table, node->index, FIL_NULL, node->add_v);
|
||||
|
||||
ut_ad((node->index == NULL) == (err != DB_SUCCESS));
|
||||
|
||||
|
@ -45,11 +45,6 @@ dict_index_t* dict_ind_redundant;
|
||||
extern uint ibuf_debug;
|
||||
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
|
||||
|
||||
/**********************************************************************
|
||||
Issue a warning that the row is too big. */
|
||||
void
|
||||
ib_warn_row_too_big(const dict_table_t* table);
|
||||
|
||||
#include "btr0btr.h"
|
||||
#include "btr0cur.h"
|
||||
#include "btr0sea.h"
|
||||
@ -2126,184 +2121,6 @@ dict_col_name_is_reserved(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
bool
|
||||
dict_index_t::rec_potentially_too_big(const dict_table_t* candidate_table,
|
||||
bool strict) const
|
||||
{
|
||||
ut_ad(!table);
|
||||
|
||||
ulint comp;
|
||||
ulint i;
|
||||
/* maximum possible storage size of a record */
|
||||
ulint rec_max_size;
|
||||
/* maximum allowed size of a record on a leaf page */
|
||||
ulint page_rec_max;
|
||||
/* maximum allowed size of a node pointer record */
|
||||
ulint page_ptr_max;
|
||||
|
||||
/* FTS index consists of auxiliary tables, they shall be excluded from
|
||||
index row size check */
|
||||
if (type & DICT_FTS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"ib_force_create_table",
|
||||
return(FALSE););
|
||||
|
||||
comp = dict_table_is_comp(candidate_table);
|
||||
|
||||
const page_size_t page_size(dict_table_page_size(candidate_table));
|
||||
|
||||
if (page_size.is_compressed()
|
||||
&& page_size.physical() < univ_page_size.physical()) {
|
||||
/* On a compressed page, two records must fit in the
|
||||
uncompressed page modification log. On compressed pages
|
||||
with size.physical() == univ_page_size.physical(),
|
||||
this limit will never be reached. */
|
||||
ut_ad(comp);
|
||||
/* The maximum allowed record size is the size of
|
||||
an empty page, minus a byte for recoding the heap
|
||||
number in the page modification log. The maximum
|
||||
allowed node pointer size is half that. */
|
||||
page_rec_max = page_zip_empty_size(n_fields,
|
||||
page_size.physical());
|
||||
if (page_rec_max) {
|
||||
page_rec_max--;
|
||||
}
|
||||
page_ptr_max = page_rec_max / 2;
|
||||
/* On a compressed page, there is a two-byte entry in
|
||||
the dense page directory for every record. But there
|
||||
is no record header. */
|
||||
rec_max_size = 2;
|
||||
} else {
|
||||
/* The maximum allowed record size is half a B-tree
|
||||
page(16k for 64k page size). No additional sparse
|
||||
page directory entry will be generated for the first
|
||||
few user records. */
|
||||
page_rec_max = (comp || srv_page_size < UNIV_PAGE_SIZE_MAX)
|
||||
? page_get_free_space_of_empty(comp) / 2
|
||||
: REDUNDANT_REC_MAX_DATA_SIZE;
|
||||
|
||||
page_ptr_max = page_rec_max;
|
||||
/* Each record has a header. */
|
||||
rec_max_size = comp
|
||||
? REC_N_NEW_EXTRA_BYTES
|
||||
: REC_N_OLD_EXTRA_BYTES;
|
||||
}
|
||||
|
||||
if (comp) {
|
||||
/* Include the "null" flags in the
|
||||
maximum possible record size. */
|
||||
rec_max_size += UT_BITS_IN_BYTES(n_nullable);
|
||||
} else {
|
||||
/* For each column, include a 2-byte offset and a
|
||||
"null" flag. The 1-byte format is only used in short
|
||||
records that do not contain externally stored columns.
|
||||
Such records could never exceed the page limit, even
|
||||
when using the 2-byte format. */
|
||||
rec_max_size += 2 * n_fields;
|
||||
}
|
||||
|
||||
const ulint max_local_len
|
||||
= candidate_table->get_overflow_field_local_len();
|
||||
|
||||
/* Compute the maximum possible record size. */
|
||||
for (i = 0; i < n_fields; i++) {
|
||||
const dict_field_t* field
|
||||
= dict_index_get_nth_field(this, i);
|
||||
const dict_col_t* col
|
||||
= dict_field_get_col(field);
|
||||
|
||||
/* In dtuple_convert_big_rec(), variable-length columns
|
||||
that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE
|
||||
may be chosen for external storage.
|
||||
|
||||
Fixed-length columns, and all columns of secondary
|
||||
index records are always stored inline. */
|
||||
|
||||
/* Determine the maximum length of the index field.
|
||||
The field_ext_max_size should be computed as the worst
|
||||
case in rec_get_converted_size_comp() for
|
||||
REC_STATUS_ORDINARY records. */
|
||||
|
||||
size_t field_max_size = dict_col_get_fixed_size(col, comp);
|
||||
if (field_max_size && field->fixed_len != 0) {
|
||||
/* dict_index_add_col() should guarantee this */
|
||||
ut_ad(!field->prefix_len
|
||||
|| field->fixed_len == field->prefix_len);
|
||||
/* Fixed lengths are not encoded
|
||||
in ROW_FORMAT=COMPACT. */
|
||||
goto add_field_size;
|
||||
}
|
||||
|
||||
field_max_size = dict_col_get_max_size(col);
|
||||
|
||||
if (field->prefix_len) {
|
||||
if (field->prefix_len < field_max_size) {
|
||||
field_max_size = field->prefix_len;
|
||||
}
|
||||
|
||||
// those conditions were copied from dtuple_convert_big_rec()
|
||||
} else if (field_max_size > max_local_len
|
||||
&& field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE
|
||||
&& DATA_BIG_COL(col)
|
||||
&& dict_index_is_clust(this)) {
|
||||
|
||||
/* In the worst case, we have a locally stored
|
||||
column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes.
|
||||
The length can be stored in one byte. If the
|
||||
column were stored externally, the lengths in
|
||||
the clustered index page would be
|
||||
BTR_EXTERN_FIELD_REF_SIZE and 2. */
|
||||
field_max_size = max_local_len;
|
||||
}
|
||||
|
||||
if (comp) {
|
||||
/* Add the extra size for ROW_FORMAT=COMPACT.
|
||||
For ROW_FORMAT=REDUNDANT, these bytes were
|
||||
added to rec_max_size before this loop. */
|
||||
rec_max_size += field_max_size < 256 ? 1 : 2;
|
||||
}
|
||||
add_field_size:
|
||||
rec_max_size += field_max_size;
|
||||
|
||||
/* Check the size limit on leaf pages. */
|
||||
if (rec_max_size >= page_rec_max) {
|
||||
// with 4k page size innodb_index_stats becomes too big
|
||||
// this crutch allows server bootstrapping to continue
|
||||
if (candidate_table->is_system_db) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ib::error_or_warn(strict)
|
||||
<< "Cannot add field " << field->name
|
||||
<< " in table " << candidate_table->name
|
||||
<< " because after adding it, the row size is "
|
||||
<< rec_max_size
|
||||
<< " which is greater than maximum allowed"
|
||||
" size (" << page_rec_max
|
||||
<< ") for a record on index leaf page.";
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Check the size limit on non-leaf pages. Records
|
||||
stored in non-leaf B-tree pages consist of the unique
|
||||
columns of the record (the key columns of the B-tree)
|
||||
and a node pointer field. When we have processed the
|
||||
unique columns, rec_max_size equals the size of the
|
||||
node pointer record minus the node pointer column. */
|
||||
if (i + 1 == dict_index_get_n_unique_in_tree(this)
|
||||
&& rec_max_size + REC_NODE_PTR_SIZE >= page_ptr_max) {
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Clears the virtual column's index list before index is
|
||||
being freed.
|
||||
@param[in] index Index being freed */
|
||||
@ -2348,17 +2165,13 @@ added column.
|
||||
@param[in,out] index index; NOTE! The index memory
|
||||
object is freed in this function!
|
||||
@param[in] page_no root page number of the index
|
||||
@param[in] strict true=refuse to create the index
|
||||
if records could be too big to fit in
|
||||
an B-tree page
|
||||
@param[in] add_v virtual columns being added along with ADD INDEX
|
||||
@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
|
||||
@return DB_SUCCESS, or DB_CORRUPTION */
|
||||
dberr_t
|
||||
dict_index_add_to_cache(
|
||||
dict_table_t* table,
|
||||
dict_index_t*& index,
|
||||
ulint page_no,
|
||||
bool strict,
|
||||
const dict_add_v_col_t* add_v)
|
||||
{
|
||||
dict_index_t* new_index;
|
||||
@ -2404,20 +2217,6 @@ dict_index_add_to_cache(
|
||||
new_index->disable_ahi = index->disable_ahi;
|
||||
#endif
|
||||
|
||||
if (new_index->rec_potentially_too_big(table, strict)) {
|
||||
|
||||
if (strict) {
|
||||
dict_mem_index_free(new_index);
|
||||
dict_mem_index_free(index);
|
||||
index = NULL;
|
||||
return DB_TOO_BIG_RECORD;
|
||||
} else if (current_thd != NULL) {
|
||||
/* Avoid the warning to be printed
|
||||
during recovery. */
|
||||
ib_warn_row_too_big((const dict_table_t*)table);
|
||||
}
|
||||
}
|
||||
|
||||
n_ord = new_index->n_uniq;
|
||||
/* Flag the ordering columns and also set column max_prefix */
|
||||
|
||||
|
@ -5578,7 +5578,7 @@ normalize_table_name_c_low(
|
||||
|
||||
create_table_info_t::create_table_info_t(
|
||||
THD* thd,
|
||||
TABLE* form,
|
||||
const TABLE* form,
|
||||
HA_CREATE_INFO* create_info,
|
||||
char* table_name,
|
||||
char* remote_path,
|
||||
@ -12738,16 +12738,250 @@ int create_table_info_t::create_table(bool create_fk)
|
||||
}
|
||||
}
|
||||
|
||||
innobase_table = dict_table_open_on_name(
|
||||
m_table_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
|
||||
innobase_table = dict_table_open_on_name(m_table_name, true, false,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
ut_ad(innobase_table);
|
||||
|
||||
if (innobase_table != NULL) {
|
||||
dict_table_close(innobase_table, TRUE, FALSE);
|
||||
const bool is_acceptable = row_size_is_acceptable(*innobase_table);
|
||||
|
||||
dict_table_close(innobase_table, true, false);
|
||||
|
||||
if (!is_acceptable) {
|
||||
DBUG_RETURN(convert_error_code_to_mysql(
|
||||
DB_TOO_BIG_RECORD, m_flags, NULL));
|
||||
}
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
bool create_table_info_t::row_size_is_acceptable(
|
||||
const dict_table_t &table) const
|
||||
{
|
||||
for (dict_index_t *index= dict_table_get_first_index(&table); index;
|
||||
index= dict_table_get_next_index(index))
|
||||
{
|
||||
|
||||
if (!row_size_is_acceptable(*index))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* FIXME: row size check has some flaws and should be improved */
|
||||
dict_index_t::record_size_info_t dict_index_t::record_size_info() const
|
||||
{
|
||||
ut_ad(!(type & DICT_FTS));
|
||||
|
||||
/* maximum allowed size of a node pointer record */
|
||||
ulint page_ptr_max;
|
||||
const bool comp= dict_table_is_comp(table);
|
||||
const page_size_t page_size(dict_table_page_size(table));
|
||||
record_size_info_t result;
|
||||
|
||||
if (page_size.is_compressed() &&
|
||||
page_size.physical() < univ_page_size.physical())
|
||||
{
|
||||
/* On a ROW_FORMAT=COMPRESSED page, two records must fit in the
|
||||
uncompressed page modification log. On compressed pages
|
||||
with size.physical() == univ_page_size.physical(),
|
||||
this limit will never be reached. */
|
||||
ut_ad(comp);
|
||||
/* The maximum allowed record size is the size of
|
||||
an empty page, minus a byte for recoding the heap
|
||||
number in the page modification log. The maximum
|
||||
allowed node pointer size is half that. */
|
||||
result.max_leaf_size= page_zip_empty_size(n_fields, page_size.physical());
|
||||
if (result.max_leaf_size)
|
||||
{
|
||||
result.max_leaf_size--;
|
||||
}
|
||||
page_ptr_max= result.max_leaf_size / 2;
|
||||
/* On a compressed page, there is a two-byte entry in
|
||||
the dense page directory for every record. But there
|
||||
is no record header. */
|
||||
result.shortest_size= 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The maximum allowed record size is half a B-tree
|
||||
page(16k for 64k page size). No additional sparse
|
||||
page directory entry will be generated for the first
|
||||
few user records. */
|
||||
result.max_leaf_size= (comp || srv_page_size < UNIV_PAGE_SIZE_MAX)
|
||||
? page_get_free_space_of_empty(comp) / 2
|
||||
: REDUNDANT_REC_MAX_DATA_SIZE;
|
||||
|
||||
page_ptr_max= result.max_leaf_size;
|
||||
/* Each record has a header. */
|
||||
result.shortest_size= comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES;
|
||||
}
|
||||
|
||||
if (comp)
|
||||
{
|
||||
/* Include the "null" flags in the
|
||||
maximum possible record size. */
|
||||
result.shortest_size+= UT_BITS_IN_BYTES(n_nullable);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* For each column, include a 2-byte offset and a
|
||||
"null" flag. The 1-byte format is only used in short
|
||||
records that do not contain externally stored columns.
|
||||
Such records could never exceed the page limit, even
|
||||
when using the 2-byte format. */
|
||||
result.shortest_size+= 2 * n_fields;
|
||||
}
|
||||
|
||||
const ulint max_local_len= table->get_overflow_field_local_len();
|
||||
|
||||
/* Compute the maximum possible record size. */
|
||||
for (unsigned i= 0; i < n_fields; i++)
|
||||
{
|
||||
const dict_field_t &f= fields[i];
|
||||
const dict_col_t &col= *f.col;
|
||||
|
||||
/* In dtuple_convert_big_rec(), variable-length columns
|
||||
that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE
|
||||
may be chosen for external storage.
|
||||
|
||||
Fixed-length columns, and all columns of secondary
|
||||
index records are always stored inline. */
|
||||
|
||||
/* Determine the maximum length of the index field.
|
||||
The field_ext_max_size should be computed as the worst
|
||||
case in rec_get_converted_size_comp() for
|
||||
REC_STATUS_ORDINARY records. */
|
||||
|
||||
size_t field_max_size= dict_col_get_fixed_size(&col, comp);
|
||||
if (field_max_size && f.fixed_len != 0)
|
||||
{
|
||||
/* dict_index_add_col() should guarantee this */
|
||||
ut_ad(!f.prefix_len || f.fixed_len == f.prefix_len);
|
||||
/* Fixed lengths are not encoded
|
||||
in ROW_FORMAT=COMPACT. */
|
||||
goto add_field_size;
|
||||
}
|
||||
|
||||
field_max_size= dict_col_get_max_size(&col);
|
||||
|
||||
if (f.prefix_len)
|
||||
{
|
||||
if (f.prefix_len < field_max_size)
|
||||
{
|
||||
field_max_size= f.prefix_len;
|
||||
}
|
||||
|
||||
/* those conditions were copied from dtuple_convert_big_rec()*/
|
||||
}
|
||||
else if (field_max_size > max_local_len &&
|
||||
field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE &&
|
||||
DATA_BIG_COL(&col) && dict_index_is_clust(this))
|
||||
{
|
||||
|
||||
/* In the worst case, we have a locally stored
|
||||
column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes.
|
||||
The length can be stored in one byte. If the
|
||||
column were stored externally, the lengths in
|
||||
the clustered index page would be
|
||||
BTR_EXTERN_FIELD_REF_SIZE and 2. */
|
||||
field_max_size= max_local_len;
|
||||
}
|
||||
|
||||
if (comp)
|
||||
{
|
||||
/* Add the extra size for ROW_FORMAT=COMPACT.
|
||||
For ROW_FORMAT=REDUNDANT, these bytes were
|
||||
added to result.shortest_size before this loop. */
|
||||
result.shortest_size+= field_max_size < 256 ? 1 : 2;
|
||||
}
|
||||
add_field_size:
|
||||
result.shortest_size+= field_max_size;
|
||||
|
||||
/* Check the size limit on leaf pages. */
|
||||
if (result.shortest_size >= result.max_leaf_size)
|
||||
{
|
||||
result.set_too_big(i);
|
||||
}
|
||||
|
||||
/* Check the size limit on non-leaf pages. Records
|
||||
stored in non-leaf B-tree pages consist of the unique
|
||||
columns of the record (the key columns of the B-tree)
|
||||
and a node pointer field. When we have processed the
|
||||
unique columns, result.shortest_size equals the size of the
|
||||
node pointer record minus the node pointer column. */
|
||||
if (i + 1 == dict_index_get_n_unique_in_tree(this) &&
|
||||
result.shortest_size + REC_NODE_PTR_SIZE >= page_ptr_max)
|
||||
{
|
||||
result.set_too_big(i);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Issue a warning that the row is too big. */
|
||||
static void ib_warn_row_too_big(THD *thd, const dict_table_t *table)
|
||||
{
|
||||
/* FIXME: this row size check should be improved */
|
||||
/* If prefix is true then a 768-byte prefix is stored
|
||||
locally for BLOB fields. Refer to dict_table_get_format() */
|
||||
const bool prefix= (dict_tf_get_format(table->flags) == UNIV_FORMAT_A);
|
||||
|
||||
const ulint free_space=
|
||||
page_get_free_space_of_empty(table->flags & DICT_TF_COMPACT) / 2;
|
||||
|
||||
push_warning_printf(
|
||||
thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
|
||||
"Row size too large (> " ULINTPF "). Changing some columns to TEXT"
|
||||
" or BLOB %smay help. In current row format, BLOB prefix of"
|
||||
" %d bytes is stored inline.",
|
||||
free_space,
|
||||
prefix ? "or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED " : "",
|
||||
prefix ? DICT_MAX_FIXED_COL_LEN : 0);
|
||||
}
|
||||
|
||||
bool create_table_info_t::row_size_is_acceptable(
|
||||
const dict_index_t &index) const
|
||||
{
|
||||
if ((index.type & DICT_FTS) || index.table->is_system_db)
|
||||
{
|
||||
/* Ignore system tables check because innodb_table_stats
|
||||
maximum row size can not fit on 4k page. */
|
||||
return true;
|
||||
}
|
||||
|
||||
const bool strict= THDVAR(m_thd, strict_mode);
|
||||
dict_index_t::record_size_info_t info= index.record_size_info();
|
||||
|
||||
if (info.row_is_too_big())
|
||||
{
|
||||
ut_ad(info.get_overrun_size() != 0);
|
||||
ut_ad(info.max_leaf_size != 0);
|
||||
|
||||
const size_t idx= info.get_first_overrun_field_index();
|
||||
const dict_field_t *field= dict_index_get_nth_field(&index, idx);
|
||||
|
||||
ib::error_or_warn(strict)
|
||||
<< "Cannot add field " << field->name << " in table "
|
||||
<< index.table->name << " because after adding it, the row size is "
|
||||
<< info.get_overrun_size()
|
||||
<< " which is greater than maximum allowed size ("
|
||||
<< info.max_leaf_size << " bytes) for a record on index leaf page.";
|
||||
|
||||
if (strict)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
ib_warn_row_too_big(m_thd, index.table);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Update a new table in an InnoDB database.
|
||||
@return error number */
|
||||
int
|
||||
@ -22165,32 +22399,6 @@ innobase_convert_to_system_charset(
|
||||
cs2, to, static_cast<uint>(len), errors)));
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Issue a warning that the row is too big. */
|
||||
void
|
||||
ib_warn_row_too_big(const dict_table_t* table)
|
||||
{
|
||||
/* If prefix is true then a 768-byte prefix is stored
|
||||
locally for BLOB fields. Refer to dict_table_get_format() */
|
||||
const bool prefix = (dict_tf_get_format(table->flags)
|
||||
== UNIV_FORMAT_A);
|
||||
|
||||
const ulint free_space = page_get_free_space_of_empty(
|
||||
table->flags & DICT_TF_COMPACT) / 2;
|
||||
|
||||
THD* thd = current_thd;
|
||||
|
||||
push_warning_printf(
|
||||
thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
|
||||
"Row size too large (> " ULINTPF ")."
|
||||
" Changing some columns to TEXT"
|
||||
" or BLOB %smay help. In current row format, BLOB prefix of"
|
||||
" %d bytes is stored inline.", free_space
|
||||
, prefix ? "or using ROW_FORMAT=DYNAMIC or"
|
||||
" ROW_FORMAT=COMPRESSED ": ""
|
||||
, prefix ? DICT_MAX_FIXED_COL_LEN : 0);
|
||||
}
|
||||
|
||||
/** Validate the requested buffer pool size. Also, reserve the necessary
|
||||
memory needed for buffer pool resize.
|
||||
@param[in] thd thread handle
|
||||
|
@ -631,7 +631,7 @@ public:
|
||||
- all but name/path is used, when validating options and using flags. */
|
||||
create_table_info_t(
|
||||
THD* thd,
|
||||
TABLE* form,
|
||||
const TABLE* form,
|
||||
HA_CREATE_INFO* create_info,
|
||||
char* table_name,
|
||||
char* remote_path,
|
||||
@ -679,6 +679,11 @@ public:
|
||||
|
||||
void allocate_trx();
|
||||
|
||||
/** Checks that every index have sane size. Depends on strict mode */
|
||||
bool row_size_is_acceptable(const dict_table_t& table) const;
|
||||
/** Checks that given index have sane size. Depends on strict mode */
|
||||
bool row_size_is_acceptable(const dict_index_t& index) const;
|
||||
|
||||
/** Determines InnoDB table flags.
|
||||
If strict_mode=OFF, this will adjust the flags to what should be assumed.
|
||||
@retval true if successful, false if error */
|
||||
|
@ -4400,6 +4400,10 @@ prepare_inplace_alter_table_dict(
|
||||
|
||||
new_clustered = DICT_CLUSTERED & index_defs[0].ind_type;
|
||||
|
||||
create_table_info_t info(ctx->prebuilt->trx->mysql_thd, altered_table,
|
||||
ha_alter_info->create_info, NULL, NULL,
|
||||
srv_file_per_table);
|
||||
|
||||
if (num_fts_index > 1) {
|
||||
my_error(ER_INNODB_FT_LIMIT, MYF(0));
|
||||
goto error_handled;
|
||||
@ -4840,6 +4844,11 @@ index_created:
|
||||
goto error_handling;
|
||||
}
|
||||
|
||||
if (!info.row_size_is_acceptable(*ctx->add_index[a])) {
|
||||
error = DB_TOO_BIG_RECORD;
|
||||
goto error_handling;
|
||||
}
|
||||
|
||||
DBUG_ASSERT(ctx->add_index[a]->is_committed()
|
||||
== !!new_clustered);
|
||||
|
||||
|
@ -1099,17 +1099,13 @@ added column.
|
||||
@param[in,out] index index; NOTE! The index memory
|
||||
object is freed in this function!
|
||||
@param[in] page_no root page number of the index
|
||||
@param[in] strict true=refuse to create the index
|
||||
if records could be too big to fit in
|
||||
an B-tree page
|
||||
@param[in] add_v virtual columns being added along with ADD INDEX
|
||||
@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
|
||||
@return DB_SUCCESS, or DB_CORRUPTION */
|
||||
dberr_t
|
||||
dict_index_add_to_cache(
|
||||
dict_table_t* table,
|
||||
dict_index_t*& index,
|
||||
ulint page_no,
|
||||
bool strict = false,
|
||||
const dict_add_v_col_t* add_v = NULL)
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
|
@ -1029,13 +1029,65 @@ struct dict_index_t{
|
||||
}
|
||||
}
|
||||
|
||||
/** If a record of this index might not fit on a single B-tree page,
|
||||
return true.
|
||||
@param[in] candidate_table where we're goint to attach this index
|
||||
@param[in] strict issue error or warning
|
||||
@return true if the index record could become too big */
|
||||
bool rec_potentially_too_big(const dict_table_t* candidate_table,
|
||||
bool strict) const;
|
||||
/** This ad-hoc class is used by record_size_info only. */
|
||||
class record_size_info_t {
|
||||
public:
|
||||
record_size_info_t()
|
||||
: max_leaf_size(0), shortest_size(0), too_big(false),
|
||||
first_overrun_field_index(SIZE_T_MAX), overrun_size(0)
|
||||
{
|
||||
}
|
||||
|
||||
/** Mark row potentially too big for page and set up first
|
||||
overflow field index. */
|
||||
void set_too_big(size_t field_index)
|
||||
{
|
||||
ut_ad(field_index != SIZE_T_MAX);
|
||||
|
||||
too_big = true;
|
||||
if (first_overrun_field_index > field_index) {
|
||||
first_overrun_field_index = field_index;
|
||||
overrun_size = shortest_size;
|
||||
}
|
||||
}
|
||||
|
||||
/** @return overrun field index or SIZE_T_MAX if nothing
|
||||
overflowed*/
|
||||
size_t get_first_overrun_field_index() const
|
||||
{
|
||||
ut_ad(row_is_too_big());
|
||||
ut_ad(first_overrun_field_index != SIZE_T_MAX);
|
||||
return first_overrun_field_index;
|
||||
}
|
||||
|
||||
size_t get_overrun_size() const
|
||||
{
|
||||
ut_ad(row_is_too_big());
|
||||
return overrun_size;
|
||||
}
|
||||
|
||||
bool row_is_too_big() const { return too_big; }
|
||||
|
||||
size_t max_leaf_size; /** Bigger row size this index can
|
||||
produce */
|
||||
size_t shortest_size; /** shortest because it counts everything
|
||||
as in overflow pages */
|
||||
|
||||
private:
|
||||
bool too_big; /** This one is true when maximum row size this
|
||||
index can produce is bigger than maximum row
|
||||
size given page can hold. */
|
||||
size_t first_overrun_field_index; /** After adding this field
|
||||
index row overflowed maximum
|
||||
allowed size. Useful for
|
||||
reporting back to user. */
|
||||
size_t overrun_size; /** Just overrun row size */
|
||||
};
|
||||
|
||||
/** Returns max possibly record size for that index, size of a shortest
|
||||
everything in overflow) size of the longest possible row and index
|
||||
of a field which made index records too big to fit on a page.*/
|
||||
inline record_size_info_t record_size_info() const;
|
||||
};
|
||||
|
||||
/** Detach a column from an index.
|
||||
|
@ -164,6 +164,9 @@ page_zip_rec_needs_ext(
|
||||
ulint n_fields,
|
||||
const page_size_t& page_size)
|
||||
{
|
||||
/* FIXME: row size check is this function seems to be the most correct.
|
||||
Put it in a separate function and use in more places of InnoDB */
|
||||
|
||||
ut_ad(rec_size
|
||||
> ulint(comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES));
|
||||
ut_ad(comp || !page_size.is_compressed());
|
||||
|
@ -2450,8 +2450,7 @@ row_create_index_for_mysql(
|
||||
} else {
|
||||
dict_build_index_def(table, index, trx);
|
||||
|
||||
err = dict_index_add_to_cache(
|
||||
table, index, FIL_NULL, trx_is_strict(trx));
|
||||
err = dict_index_add_to_cache(table, index, FIL_NULL);
|
||||
ut_ad((index == NULL) == (err != DB_SUCCESS));
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
Reference in New Issue
Block a user