mirror of
https://github.com/MariaDB/server.git
synced 2025-08-05 13:16:09 +03:00
Merge mysql.com:/home/hf/work/32247/my51-32247
into mysql.com:/home/hf/work/mrg/my51-mrg
This commit is contained in:
@@ -12665,7 +12665,7 @@ t6 CREATE TABLE `t6` (
|
|||||||
`b` tinyblob,
|
`b` tinyblob,
|
||||||
`c` int(11) DEFAULT NULL,
|
`c` int(11) DEFAULT NULL,
|
||||||
KEY `a` (`a`)
|
KEY `a` (`a`)
|
||||||
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
|
) ENGINE=ARCHIVE AUTO_INCREMENT=36 DEFAULT CHARSET=latin1
|
||||||
DROP TABLE t1, t2, t4, t5, t6;
|
DROP TABLE t1, t2, t4, t5, t6;
|
||||||
create table t1 (i int) engine=archive;
|
create table t1 (i int) engine=archive;
|
||||||
insert into t1 values (1);
|
insert into t1 values (1);
|
||||||
|
@@ -1338,4 +1338,19 @@ int_column char_column
|
|||||||
545592 zZzSD
|
545592 zZzSD
|
||||||
39868 zZZRW
|
39868 zZZRW
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT,
|
||||||
|
user CHAR(25), PRIMARY KEY(id))
|
||||||
|
PARTITION BY RANGE(id)
|
||||||
|
SUBPARTITION BY hash(id) subpartitions 2
|
||||||
|
(PARTITION pa1 values less than (10),
|
||||||
|
PARTITION pa2 values less than (20),
|
||||||
|
PARTITION pa11 values less than MAXVALUE);
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`id` mediumint(9) NOT NULL AUTO_INCREMENT,
|
||||||
|
`user` char(25) DEFAULT NULL,
|
||||||
|
PRIMARY KEY (`id`)
|
||||||
|
) ENGINE=MyISAM AUTO_INCREMENT=16 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION pa2 VALUES LESS THAN (20) ENGINE = MyISAM, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */
|
||||||
|
drop table t1;
|
||||||
End of 5.1 tests
|
End of 5.1 tests
|
||||||
|
@@ -113,7 +113,7 @@ Create Table CREATE TABLE `byrange_tbl` (
|
|||||||
`fkid` mediumint(9) DEFAULT NULL,
|
`fkid` mediumint(9) DEFAULT NULL,
|
||||||
`filler` varchar(255) DEFAULT NULL,
|
`filler` varchar(255) DEFAULT NULL,
|
||||||
PRIMARY KEY (`id`)
|
PRIMARY KEY (`id`)
|
||||||
) ENGINE=InnoDB AUTO_INCREMENT=9 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = InnoDB, PARTITION pa2 VALUES LESS THAN (20) ENGINE = InnoDB, PARTITION pa3 VALUES LESS THAN (30) ENGINE = InnoDB, PARTITION pa4 VALUES LESS THAN (40) ENGINE = InnoDB, PARTITION pa5 VALUES LESS THAN (50) ENGINE = InnoDB, PARTITION pa6 VALUES LESS THAN (60) ENGINE = InnoDB, PARTITION pa7 VALUES LESS THAN (70) ENGINE = InnoDB, PARTITION pa8 VALUES LESS THAN (80) ENGINE = InnoDB, PARTITION pa9 VALUES LESS THAN (90) ENGINE = InnoDB, PARTITION pa10 VALUES LESS THAN (100) ENGINE = InnoDB, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
|
) ENGINE=InnoDB AUTO_INCREMENT=1001 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = InnoDB, PARTITION pa2 VALUES LESS THAN (20) ENGINE = InnoDB, PARTITION pa3 VALUES LESS THAN (30) ENGINE = InnoDB, PARTITION pa4 VALUES LESS THAN (40) ENGINE = InnoDB, PARTITION pa5 VALUES LESS THAN (50) ENGINE = InnoDB, PARTITION pa6 VALUES LESS THAN (60) ENGINE = InnoDB, PARTITION pa7 VALUES LESS THAN (70) ENGINE = InnoDB, PARTITION pa8 VALUES LESS THAN (80) ENGINE = InnoDB, PARTITION pa9 VALUES LESS THAN (90) ENGINE = InnoDB, PARTITION pa10 VALUES LESS THAN (100) ENGINE = InnoDB, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
|
||||||
show slave status;
|
show slave status;
|
||||||
Slave_IO_State Waiting for master to send event
|
Slave_IO_State Waiting for master to send event
|
||||||
Master_Host 127.0.0.1
|
Master_Host 127.0.0.1
|
||||||
|
@@ -1577,4 +1577,25 @@ INSERT INTO t1 (int_column, char_column) VALUES
|
|||||||
SELECT * FROM t1 ORDER BY char_column DESC;
|
SELECT * FROM t1 ORDER BY char_column DESC;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table)
|
||||||
|
#
|
||||||
|
|
||||||
|
CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT,
|
||||||
|
user CHAR(25), PRIMARY KEY(id))
|
||||||
|
PARTITION BY RANGE(id)
|
||||||
|
SUBPARTITION BY hash(id) subpartitions 2
|
||||||
|
(PARTITION pa1 values less than (10),
|
||||||
|
PARTITION pa2 values less than (20),
|
||||||
|
PARTITION pa11 values less than MAXVALUE);
|
||||||
|
--disable_query_log
|
||||||
|
let $n= 15;
|
||||||
|
while ($n)
|
||||||
|
{
|
||||||
|
insert into t1 (user) values ('mysql');
|
||||||
|
dec $n;
|
||||||
|
}
|
||||||
|
--enable_query_log
|
||||||
|
show create table t1;
|
||||||
|
drop table t1;
|
||||||
--echo End of 5.1 tests
|
--echo End of 5.1 tests
|
||||||
|
@@ -1598,7 +1598,11 @@ error:
|
|||||||
|
|
||||||
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
|
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
|
||||||
{
|
{
|
||||||
m_file[0]->update_create_info(create_info);
|
info(HA_STATUS_AUTO);
|
||||||
|
|
||||||
|
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
||||||
|
create_info->auto_increment_value= stats.auto_increment_value;
|
||||||
|
|
||||||
create_info->data_file_name= create_info->index_file_name = NULL;
|
create_info->data_file_name= create_info->index_file_name = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@@ -357,7 +357,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
|
|||||||
{
|
{
|
||||||
DBUG_RETURN(NULL);
|
DBUG_RETURN(NULL);
|
||||||
}
|
}
|
||||||
stats.auto_increment_value= archive_tmp.auto_increment;
|
stats.auto_increment_value= archive_tmp.auto_increment + 1;
|
||||||
share->rows_recorded= (ha_rows)archive_tmp.rows;
|
share->rows_recorded= (ha_rows)archive_tmp.rows;
|
||||||
share->crashed= archive_tmp.dirty;
|
share->crashed= archive_tmp.dirty;
|
||||||
azclose(&archive_tmp);
|
azclose(&archive_tmp);
|
||||||
@@ -586,9 +586,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
|
|||||||
|
|
||||||
DBUG_ENTER("ha_archive::create");
|
DBUG_ENTER("ha_archive::create");
|
||||||
|
|
||||||
stats.auto_increment_value= (create_info->auto_increment_value ?
|
stats.auto_increment_value= create_info->auto_increment_value;
|
||||||
create_info->auto_increment_value -1 :
|
|
||||||
(ulonglong) 0);
|
|
||||||
|
|
||||||
for (uint key= 0; key < table_arg->s->keys; key++)
|
for (uint key= 0; key < table_arg->s->keys; key++)
|
||||||
{
|
{
|
||||||
@@ -673,7 +671,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
|
|||||||
Yes you need to do this, because the starting value
|
Yes you need to do this, because the starting value
|
||||||
for the autoincrement may not be zero.
|
for the autoincrement may not be zero.
|
||||||
*/
|
*/
|
||||||
create_stream.auto_increment= stats.auto_increment_value;
|
create_stream.auto_increment= stats.auto_increment_value ?
|
||||||
|
stats.auto_increment_value - 1 : 0;
|
||||||
if (azclose(&create_stream))
|
if (azclose(&create_stream))
|
||||||
{
|
{
|
||||||
error= errno;
|
error= errno;
|
||||||
@@ -871,8 +870,8 @@ int ha_archive::write_row(uchar *buf)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (temp_auto > share->archive_write.auto_increment)
|
if (temp_auto > share->archive_write.auto_increment)
|
||||||
stats.auto_increment_value= share->archive_write.auto_increment=
|
stats.auto_increment_value=
|
||||||
temp_auto;
|
(share->archive_write.auto_increment= temp_auto) + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -896,7 +895,7 @@ void ha_archive::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||||||
ulonglong *first_value,
|
ulonglong *first_value,
|
||||||
ulonglong *nb_reserved_values)
|
ulonglong *nb_reserved_values)
|
||||||
{
|
{
|
||||||
*nb_reserved_values= 1;
|
*nb_reserved_values= ULONGLONG_MAX;
|
||||||
*first_value= share->archive_write.auto_increment + 1;
|
*first_value= share->archive_write.auto_increment + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1315,7 +1314,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
|
|||||||
if (!rc)
|
if (!rc)
|
||||||
{
|
{
|
||||||
share->rows_recorded= 0;
|
share->rows_recorded= 0;
|
||||||
stats.auto_increment_value= share->archive_write.auto_increment= 0;
|
stats.auto_increment_value= 1;
|
||||||
|
share->archive_write.auto_increment= 0;
|
||||||
my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
|
my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
|
||||||
|
|
||||||
while (!(rc= get_row(&archive, table->record[0])))
|
while (!(rc= get_row(&archive, table->record[0])))
|
||||||
@@ -1332,8 +1332,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
|
|||||||
(ulonglong) field->val_int(table->record[0] +
|
(ulonglong) field->val_int(table->record[0] +
|
||||||
field->offset(table->record[0]));
|
field->offset(table->record[0]));
|
||||||
if (share->archive_write.auto_increment < auto_value)
|
if (share->archive_write.auto_increment < auto_value)
|
||||||
stats.auto_increment_value= share->archive_write.auto_increment=
|
stats.auto_increment_value=
|
||||||
auto_value;
|
(share->archive_write.auto_increment= auto_value) + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1418,18 +1418,9 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
|
|||||||
DBUG_ENTER("ha_archive::update_create_info");
|
DBUG_ENTER("ha_archive::update_create_info");
|
||||||
|
|
||||||
ha_archive::info(HA_STATUS_AUTO);
|
ha_archive::info(HA_STATUS_AUTO);
|
||||||
if (create_info->used_fields & HA_CREATE_USED_AUTO)
|
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
||||||
{
|
{
|
||||||
/*
|
create_info->auto_increment_value= stats.auto_increment_value;
|
||||||
Internally Archive keeps track of last used, not next used.
|
|
||||||
To make the output look like MyISAM we add 1 here.
|
|
||||||
|
|
||||||
This is not completely compatible with MYISAM though, since
|
|
||||||
MyISAM will record on "SHOW CREATE TABLE" the last position,
|
|
||||||
where we will report the original position the table was
|
|
||||||
created with.
|
|
||||||
*/
|
|
||||||
create_info->auto_increment_value= stats.auto_increment_value + 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
|
if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
|
||||||
@@ -1494,7 +1485,7 @@ int ha_archive::info(uint flag)
|
|||||||
pthread_mutex_lock(&share->mutex);
|
pthread_mutex_lock(&share->mutex);
|
||||||
azflush(&archive, Z_SYNC_FLUSH);
|
azflush(&archive, Z_SYNC_FLUSH);
|
||||||
pthread_mutex_unlock(&share->mutex);
|
pthread_mutex_unlock(&share->mutex);
|
||||||
stats.auto_increment_value= archive.auto_increment;
|
stats.auto_increment_value= archive.auto_increment + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
|
Reference in New Issue
Block a user