1
0
mirror of https://github.com/MariaDB/server.git synced 2025-06-13 13:01:51 +03:00

Merge bk-internal.mysql.com:/home/bk/mysql-4.1

into mysql.com:/home/my/mysql-4.1
This commit is contained in:
monty@mysql.com
2004-10-20 18:58:01 +03:00
21 changed files with 284 additions and 47 deletions

View File

@ -44,12 +44,12 @@ do
case $host_os in
netware* | modesto*)
echo "$i/errmsg.sys: $i/errmsg.txt
\$(top_builddir)/extra/comp_err.linux -C\$(srcdir)/charsets/ \$^ $i/errmsg.sys" \
\$(top_builddir)/extra/comp_err.linux -C\$(srcdir)/charsets/ $i/errmsg.txt $i/errmsg.sys" \
>> $AVAILABLE_LANGUAGES_ERRORS_RULES
;;
*)
echo "$i/errmsg.sys: $i/errmsg.txt
\$(top_builddir)/extra/comp_err -C\$(srcdir)/charsets/ \$^ $i/errmsg.sys" \
\$(top_builddir)/extra/comp_err -C\$(srcdir)/charsets/ $i/errmsg.txt $i/errmsg.sys" \
>> $AVAILABLE_LANGUAGES_ERRORS_RULES
;;
esac

View File

@ -97,9 +97,11 @@ sync_arr_wake_threads_if_sema_free(void);
/**************************************************************************
Prints warnings of long semaphore waits to stderr. */
void
ibool
sync_array_print_long_waits(void);
/*=============================*/
/* out: TRUE if fatal semaphore wait threshold
was exceeded */
/************************************************************************
Validates the integrity of the wait array. Checks
that the number of reserved cells equals the count variable. */

View File

@ -1820,7 +1820,8 @@ srv_error_monitor_thread(
/* in: a dummy parameter required by
os_thread_create */
{
ulint cnt = 0;
/* number of successive fatal timeouts observed */
ulint fatal_cnt = 0;
dulint old_lsn;
dulint new_lsn;
@ -1833,8 +1834,6 @@ srv_error_monitor_thread(
loop:
srv_error_monitor_active = TRUE;
cnt++;
/* Try to track a strange bug reported by Harald Fuchs and others,
where the lsn seems to decrease at times */
@ -1861,7 +1860,20 @@ loop:
srv_refresh_innodb_monitor_stats();
}
sync_array_print_long_waits();
if (sync_array_print_long_waits()) {
fatal_cnt++;
if (fatal_cnt > 5) {
fprintf(stderr,
"InnoDB: Error: semaphore wait has lasted > %lu seconds\n"
"InnoDB: We intentionally crash the server, because it appears to be hung.\n",
srv_fatal_semaphore_wait_threshold);
ut_error;
}
} else {
fatal_cnt = 0;
}
/* Flush stderr so that a database user gets the output
to possible MySQL error file */

View File

@ -894,15 +894,18 @@ sync_arr_wake_threads_if_sema_free(void)
/**************************************************************************
Prints warnings of long semaphore waits to stderr. */
void
ibool
sync_array_print_long_waits(void)
/*=============================*/
/* out: TRUE if fatal semaphore wait threshold
was exceeded */
{
sync_cell_t* cell;
ibool old_val;
ibool noticed = FALSE;
ulint i;
ulint fatal_timeout = srv_fatal_semaphore_wait_threshold;
ibool fatal = FALSE;
for (i = 0; i < sync_primary_wait_array->n_cells; i++) {
@ -919,13 +922,7 @@ sync_array_print_long_waits(void)
if (cell->wait_object != NULL
&& difftime(time(NULL), cell->reservation_time)
> fatal_timeout) {
fprintf(stderr,
"InnoDB: Error: semaphore wait has lasted > %lu seconds\n"
"InnoDB: We intentionally crash the server, because it appears to be hung.\n",
fatal_timeout);
ut_error;
fatal = TRUE;
}
}
@ -953,6 +950,8 @@ sync_array_print_long_waits(void)
fprintf(stderr,
"InnoDB: ###### Diagnostic info printed to the standard error stream\n");
}
return(fatal);
}
/**************************************************************************

View File

@ -1258,7 +1258,7 @@ trx_undo_prev_version_build(
ibool dummy_extern;
byte* buf;
ulint err;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
@ -1363,7 +1363,18 @@ trx_undo_prev_version_build(
}
if (row_upd_changes_field_size_or_external(rec, index, update)) {
ulint* ext_vect;
ulint n_ext_vect;
/* We have to set the appropriate extern storage bits in the
old version of the record: the extern bits in rec for those
fields that update does NOT update, as well as the the bits for
those fields that update updates to become externally stored
fields. Store the info to ext_vect: */
ext_vect = mem_alloc(sizeof(ulint) * rec_get_n_fields(rec));
n_ext_vect = btr_push_update_extern_fields(ext_vect, rec,
update);
entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec,
heap);
row_upd_index_replace_new_col_vals(entry, index, update, heap);
@ -1371,6 +1382,11 @@ trx_undo_prev_version_build(
buf = mem_heap_alloc(heap, rec_get_converted_size(entry));
*old_vers = rec_convert_dtuple_to_rec(buf, entry);
/* Now set the extern bits in the old version of the record */
rec_set_field_extern_bits(*old_vers, ext_vect, n_ext_vect,
NULL);
mem_free(ext_vect);
} else {
buf = mem_heap_alloc(heap, rec_get_size(rec));
@ -1379,15 +1395,5 @@ trx_undo_prev_version_build(
row_upd_rec_in_place(*old_vers, update);
}
for (i = 0; i < upd_get_n_fields(update); i++) {
if (upd_get_nth_field(update, i)->extern_storage) {
rec_set_nth_field_extern_bit(*old_vers,
upd_get_nth_field(update, i)->field_no,
TRUE, NULL);
}
}
return(DB_SUCCESS);
}

View File

@ -1543,6 +1543,40 @@ void my_net_local_init(NET *net)
net->max_packet_size= max(net_buffer_length, max_allowed_packet);
}
/*
This function is used to create HEX string that you
can use in a SQL statement in of the either ways:
INSERT INTO blob_column VALUES (0xAABBCC); (any MySQL version)
INSERT INTO blob_column VALUES (X'AABBCC'); (4.1 and higher)
The string in "from" is encoded to a HEX string.
The result is placed in "to" and a terminating null byte is appended.
The string pointed to by "from" must be "length" bytes long.
You must allocate the "to" buffer to be at least length*2+1 bytes long.
Each character needs two bytes, and you need room for the terminating
null byte. When mysql_hex_string() returns, the contents of "to" will
be a null-terminated string. The return value is the length of the
encoded string, not including the terminating null character.
The return value does not contain any leading 0x or a leading X' and
trailing '. The caller must supply whichever of those is desired.
*/
ulong mysql_hex_string(char *to, const char *from, ulong length)
{
char *to0= to;
const char *end;
for (end= from + length; from < end; from++)
{
*to++= _dig_vec_upper[((unsigned char) *from) >> 4];
*to++= _dig_vec_upper[((unsigned char) *from) & 0x0F];
}
*to= '\0';
return (ulong) (to-to0);
}
/*
Add escape characters to a string (blob?) to make it suitable for a insert
to should at least have place for length*2+1 chars

View File

@ -111,6 +111,8 @@ typedef struct st_isam_mrg {
uint ref_length;
uint max_blob_length;
my_off_t records;
/* true if at least one source file has at least one disabled index */
my_bool src_file_has_indexes_disabled;
} PACK_MRG_INFO;
@ -413,10 +415,15 @@ static bool open_isam_files(PACK_MRG_INFO *mrg,char **names,uint count)
mrg->current=0;
mrg->file=(MI_INFO**) my_malloc(sizeof(MI_INFO*)*count,MYF(MY_FAE));
mrg->free_file=1;
mrg->src_file_has_indexes_disabled= 0;
for (i=0; i < count ; i++)
{
if (!(mrg->file[i]=open_isam_file(names[i],O_RDONLY)))
goto error;
mrg->src_file_has_indexes_disabled|= ((mrg->file[i]->s->state.key_map !=
(((ulonglong) 1) <<
mrg->file[i]->s->base. keys) - 1));
}
/* Check that files are identical */
for (j=0 ; j < count-1 ; j++)
@ -2043,12 +2050,21 @@ static int save_state(MI_INFO *isam_file,PACK_MRG_INFO *mrg,my_off_t new_length,
share->state.dellink= HA_OFFSET_ERROR;
share->state.split=(ha_rows) mrg->records;
share->state.version=(ulong) time((time_t*) 0);
share->state.key_map=0;
if (share->state.key_map != (1ULL << share->base.keys) - 1)
{
/*
Don't save key_file_length here, keep key_file_length of original file
so "myisamchk -rq" can use this value (this is necessary because index
size cannot be easily calculated for fulltext keys)
Some indexes are disabled, cannot use current key_file_length value
as an estimate of upper bound of index file size. Use packed data file
size instead.
*/
share->state.state.key_file_length= new_length;
}
/*
If there are no disabled indexes, keep key_file_length value from
original file so "myisamchk -rq" can use this value (this is necessary
because index size cannot be easily calculated for fulltext keys)
*/
share->state.key_map=0;
for (key=0 ; key < share->base.keys ; key++)
share->state.key_root[key]= HA_OFFSET_ERROR;
for (key=0 ; key < share->state.header.max_block_size ; key++)
@ -2057,8 +2073,7 @@ static int save_state(MI_INFO *isam_file,PACK_MRG_INFO *mrg,my_off_t new_length,
share->changed=1; /* Force write of header */
share->state.open_count=0;
share->global_changed=0;
VOID(my_chsize(share->kfile, share->state.state.key_file_length, 0,
MYF(0)));
VOID(my_chsize(share->kfile, share->base.keystart, 0, MYF(0)));
if (share->base.keys)
isamchk_neaded=1;
DBUG_RETURN(mi_state_info_write(share->kfile,&share->state,1+2));
@ -2081,7 +2096,12 @@ static int save_state_mrg(File file,PACK_MRG_INFO *mrg,my_off_t new_length,
state.state.del=0;
state.state.empty=0;
state.state.records=state.split=(ha_rows) mrg->records;
state.state.key_file_length=isam_file->s->base.keystart;
/* See comment above in save_state about key_file_length handling. */
if (mrg->src_file_has_indexes_disabled)
{
isam_file->s->state.state.key_file_length=
max(isam_file->s->state.state.key_file_length, new_length);
}
state.dellink= HA_OFFSET_ERROR;
state.version=(ulong) time((time_t*) 0);
state.key_map=0;

View File

@ -1,4 +1,8 @@
select @@innodb_table_locks;
@@innodb_table_locks
1
drop table if exists t1;
set @@innodb_table_locks=1;
create table t1 (id integer, x integer) engine=INNODB;
insert into t1 values(0, 0);
set autocommit=0;
@ -20,3 +24,33 @@ id x
0 2
commit;
drop table t1;
set @@innodb_table_locks=0;
create table t1 (id integer primary key, x integer) engine=INNODB;
insert into t1 values(0, 0),(1,1),(2,2);
commit;
SELECT * from t1 where id = 0 FOR UPDATE;
id x
0 0
set autocommit=0;
set @@innodb_table_locks=0;
lock table t1 write;
update t1 set x=10 where id = 2;
SELECT * from t1 where id = 2;
id x
2 2
UPDATE t1 set x=3 where id = 2;
commit;
SELECT * from t1;
id x
0 0
1 1
2 3
commit;
unlock tables;
commit;
select * from t1;
id x
0 0
1 1
2 10
drop table t1;

View File

@ -0,0 +1,13 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
create table t1 (a int) engine=innodb;
begin;
insert into t1 values(1);
flush tables with read lock;
commit;
unlock tables;
drop table t1;

View File

@ -0,0 +1 @@
--innodb-table-lock=1

View File

@ -1,5 +1,15 @@
-- source include/have_innodb.inc
#
# Check and select innodb lock type
#
select @@innodb_table_locks;
#
# Testing of explicit table locks with enforced table locks
#
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
@ -8,9 +18,11 @@ drop table if exists t1;
--enable_warnings
#
# Testing of explicit table locks
# Testing of explicit table locks with enforced table locks
#
set @@innodb_table_locks=1;
connection con1;
create table t1 (id integer, x integer) engine=INNODB;
insert into t1 values(0, 0);
@ -41,3 +53,46 @@ select * from t1;
commit;
drop table t1;
#
# Try with old lock method (where LOCK TABLE is ignored by InnoDB)
#
set @@innodb_table_locks=0;
create table t1 (id integer primary key, x integer) engine=INNODB;
insert into t1 values(0, 0),(1,1),(2,2);
commit;
SELECT * from t1 where id = 0 FOR UPDATE;
connection con2;
set autocommit=0;
set @@innodb_table_locks=0;
# The following statement should work becase innodb doesn't check table locks
lock table t1 write;
connection con1;
# This will be locked by MySQL
--send
update t1 set x=10 where id = 2;
--sleep 2
connection con2;
# Note that we will get a deadlock if we try to select any rows marked
# for update by con1 !
SELECT * from t1 where id = 2;
UPDATE t1 set x=3 where id = 2;
commit;
SELECT * from t1;
commit;
unlock tables;
connection con1;
reap;
commit;
select * from t1;
drop table t1;

View File

@ -0,0 +1,17 @@
source include/master-slave.inc;
source include/have_innodb.inc;
create table t1 (a int) engine=innodb;
begin;
insert into t1 values(1);
flush tables with read lock;
commit;
save_master_pos;
connection slave;
sync_with_master;
# cleanup
connection master;
unlock tables;
drop table t1;
save_master_pos;
connection slave;
sync_with_master;

View File

@ -552,8 +552,14 @@ int thr_lock(THR_LOCK_DATA *data,enum thr_lock_type lock_type)
!lock->write_wait.data &&
lock->write.data->type == TL_WRITE_ALLOW_WRITE))
{
/* We have already got a write lock or all locks are
TL_WRITE_ALLOW_WRITE */
/*
We have already got a write lock or all locks are
TL_WRITE_ALLOW_WRITE
*/
DBUG_PRINT("info", ("write_wait.data: 0x%lx old_type: %d",
(ulong) lock->write_wait.data,
lock->write.data->type));
(*lock->write.last)=data; /* Add to running fifo */
data->prev=lock->write.last;
lock->write.last= &data->next;
@ -568,6 +574,8 @@ int thr_lock(THR_LOCK_DATA *data,enum thr_lock_type lock_type)
}
else
{
DBUG_PRINT("info", ("write_wait.data: 0x%lx",
(ulong) lock->write_wait.data));
if (!lock->write_wait.data)
{ /* no scheduled write locks */
if (lock_type == TL_WRITE_CONCURRENT_INSERT &&

View File

@ -8,7 +8,7 @@ use File::Path;
use DBI;
use Sys::Hostname;
use File::Copy;
use File::Temp;
use File::Temp qw(tempfile);
=head1 NAME
@ -39,7 +39,7 @@ WARNING: THIS PROGRAM IS STILL IN BETA. Comments/patches welcome.
# Documentation continued at end of file
my $VERSION = "1.21";
my $VERSION = "1.22";
my $opt_tmpdir = $ENV{TMPDIR} || "/tmp";
@ -655,7 +655,7 @@ sub copy_index
}
elsif ($opt{method} =~ /^scp\b/)
{
my ($fh, $tmp)=tempfile('mysqlhotcopy-XXXXXX', DIR => $opt_tmpdir);
my ($fh, $tmp)= tempfile('mysqlhotcopy-XXXXXX', DIR => $opt_tmpdir) or
die "Can\'t create/open file in $opt_tmpdir\n";
if (syswrite($fh,$buff) != length($buff))
{

View File

@ -302,7 +302,7 @@ convert_error_code_to_mysql(
} else if (error == (int) DB_CANNOT_DROP_CONSTRAINT) {
return(HA_ERR_CANNOT_ADD_FOREIGN); /* TODO: This is a bit
return(HA_ERR_ROW_IS_REFERENCED); /* TODO: This is a bit
misleading, a new MySQL error
code should be introduced */
} else if (error == (int) DB_COL_APPEARS_TWICE_IN_INDEX) {
@ -4958,7 +4958,8 @@ ha_innobase::external_lock(
}
if (prebuilt->select_lock_type != LOCK_NONE) {
if (thd->in_lock_tables) {
if (thd->in_lock_tables &&
thd->variables.innodb_table_locks) {
ulint error;
error = row_lock_table_for_mysql(prebuilt);

View File

@ -537,6 +537,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
{
int error;
if (!file) return HA_ADMIN_INTERNAL_ERROR;
MI_CHECK param;
@ -546,7 +547,14 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
param.testflag = (check_opt->flags | T_SILENT | T_FORCE_CREATE |
T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX);
param.sort_buffer_length= check_opt->sort_buffer_size;
return repair(thd,param,1);
if ((error= repair(thd,param,1)) && param.retry_repair)
{
sql_print_warning("Warning: Optimize table got errno %d, retrying",
my_errno);
param.testflag&= ~T_REP_BY_SORT;
error= repair(thd,param,1);
}
return error;
}
@ -913,7 +921,13 @@ int ha_myisam::enable_indexes(uint mode)
param.myf_rw&= ~MY_WAIT_IF_FULL;
param.sort_buffer_length= thd->variables.myisam_sort_buff_size;
param.tmpdir=&mysql_tmpdir_list;
error=repair(thd,param,0) != HA_ADMIN_OK;
if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair)
{
sql_print_warning("Warning: Enabling keys got errno %d, retrying",
my_errno);
param.testflag&= ~(T_REP_BY_SORT | T_QUICK);
error= (repair(thd,param,0) != HA_ADMIN_OK);
}
info(HA_STATUS_CONST);
thd->proc_info=save_proc_info;
}

View File

@ -792,9 +792,15 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh,
{
if (thd->global_read_lock) // This thread had the read locks
{
if (is_not_commit)
my_error(ER_CANT_UPDATE_WITH_READLOCK,MYF(0));
(void) pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(1);
/*
We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does.
This allowance is needed to not break existing versions of innobackup
which do a BEGIN; INSERT; FLUSH TABLES WITH READ LOCK; COMMIT.
*/
DBUG_RETURN(is_not_commit);
}
old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
"Waiting for release of readlock");

View File

@ -4012,6 +4012,7 @@ enum options_mysqld
OPT_INNODB_FORCE_RECOVERY,
OPT_INNODB_STATUS_FILE,
OPT_INNODB_MAX_DIRTY_PAGES_PCT,
OPT_INNODB_TABLE_LOCKS,
OPT_INNODB_OPEN_FILES,
OPT_INNODB_AUTOEXTEND_INCREMENT,
OPT_BDB_CACHE_SIZE,
@ -4250,6 +4251,11 @@ Disable with --skip-innodb (will save memory).",
"Enable SHOW INNODB STATUS output in the innodb_status.<pid> file",
(gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file,
0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"innodb_table_locks", OPT_INNODB_TABLE_LOCKS,
"If Innodb should enforce LOCK TABLE",
(gptr*) &global_system_variables.innodb_table_locks,
(gptr*) &global_system_variables.innodb_table_locks,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
#endif /* End HAVE_INNOBASE_DB */
{"isam", OPT_ISAM, "Enable ISAM (if this version of MySQL supports it). \
Disable with --skip-isam.",
@ -4812,7 +4818,7 @@ replicating a LOAD DATA INFILE command.",
(gptr*) &dflt_key_cache_var.param_buff_size,
(gptr*) 0,
0, (GET_ULL | GET_ASK_ADDR),
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, UINT_MAX32, MALLOC_OVERHEAD,
IO_SIZE, 0},
{"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
"This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",

View File

@ -351,6 +351,8 @@ sys_var_thd_ulong sys_net_wait_timeout("wait_timeout",
#ifdef HAVE_INNOBASE_DB
sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct",
&srv_max_buf_pool_modified_pct);
sys_var_thd_bool sys_innodb_table_locks("innodb_table_locks",
&SV::innodb_table_locks);
sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment",
&srv_auto_extend_increment);
sys_var_long_ptr sys_innodb_max_purge_lag("innodb_max_purge_lag",
@ -605,6 +607,7 @@ sys_var *sys_variables[]=
&sys_os,
#ifdef HAVE_INNOBASE_DB
&sys_innodb_max_dirty_pages_pct,
&sys_innodb_table_locks,
&sys_innodb_max_purge_lag,
&sys_innodb_autoextend_increment,
#endif
@ -700,6 +703,7 @@ struct show_var_st init_vars[]= {
{"innodb_log_files_in_group", (char*) &innobase_log_files_in_group, SHOW_LONG},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
{sys_innodb_max_dirty_pages_pct.name, (char*) &sys_innodb_max_dirty_pages_pct, SHOW_SYS},
{sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS},
{sys_innodb_max_purge_lag.name, (char*) &sys_innodb_max_purge_lag, SHOW_SYS},
{"innodb_mirrored_log_groups", (char*) &innobase_mirrored_log_groups, SHOW_LONG},
{"innodb_open_files", (char*) &innobase_open_files, SHOW_LONG },

View File

@ -818,6 +818,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
table->query_id=thd->query_id;
table->clear_query_id=1;
thd->tmp_table_used= 1;
DBUG_PRINT("info",("Using temporary table"));
goto reset;
}
}
@ -832,6 +833,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
table->query_id != thd->query_id)
{
table->query_id=thd->query_id;
DBUG_PRINT("info",("Using locked table"));
goto reset;
}
}

View File

@ -396,6 +396,9 @@ struct system_variables
my_bool low_priority_updates;
my_bool new_mode;
my_bool query_cache_wlock_invalidate;
#ifdef HAVE_INNOBASE_DB
my_bool innodb_table_locks;
#endif /* HAVE_INNOBASE_DB */
my_bool old_passwords;
/* Only charset part of these variables is sensible */