From 523882ab71ee8b1d9e4c0f682b96069d5f8d83c9 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 30 Dec 2004 19:56:09 +0100 Subject: [PATCH 1/8] Fix for bug #7480 Mysqld crash in ha_ndbcluster using Query Browser BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + sql/ha_ndbcluster.cc | 77 +++++++++++++++++++++++++--------------- sql/ha_ndbcluster.h | 2 +- 3 files changed, 51 insertions(+), 29 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 62b00f97f06..140d1359b08 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -100,6 +100,7 @@ lenz@mysql.com magnus@neptunus.(none) magnus@shellback.(none) marko@hundin.mysql.fi +marty@linux.site matt@mysql.com miguel@hegel.(none) miguel@hegel.br diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 145fd23ff43..f96114d8460 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -204,6 +204,12 @@ Thd_ndb::~Thd_ndb() delete ndb; } +inline +Ndb *ha_ndbcluster::get_ndb() +{ + return ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb; +} + /* * manage uncommitted insert/deletes during transactio to get records correct */ @@ -233,8 +239,9 @@ void ha_ndbcluster::records_update() info->no_uncommitted_rows_count)); // if (info->records == ~(ha_rows)0) { + Ndb *ndb= get_ndb(); Uint64 rows; - if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ + if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){ info->records= rows; } } @@ -311,7 +318,8 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) switch (err.classification) { case NdbError::SchemaError: { - NDBDICT *dict= m_ndb->getDictionary(); + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); DBUG_PRINT("info", ("invalidateTable %s", m_tabname)); dict->invalidateTable(m_tabname); table->version=0L; /* Free when thread is ready */ @@ -341,7 +349,7 @@ bool ha_ndbcluster::get_error_message(int error, DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); - Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb; + Ndb *ndb= get_ndb(); if (!ndb) DBUG_RETURN(FALSE); @@ -670,7 +678,8 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields) int ha_ndbcluster::get_metadata(const char *path) { - NDBDICT *dict= m_ndb->getDictionary(); + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); const NDBTAB *tab; int error; bool invalidating_ndb_table= FALSE; @@ -745,7 +754,8 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) static const char* unique_suffix= "$unique"; KEY* key_info= tab->key_info; const char **key_name= tab->keynames.type_names; - NdbDictionary::Dictionary *dict= m_ndb->getDictionary(); + Ndb *ndb= get_ndb(); + NdbDictionary::Dictionary *dict= ndb->getDictionary(); DBUG_ENTER("build_index_list"); // Save information about all known indexes @@ -1651,7 +1661,8 @@ int ha_ndbcluster::write_row(byte *record) if (table->primary_key == MAX_KEY) { // Table has hidden primary key - Uint64 auto_value= m_ndb->getAutoIncrementValue((const NDBTAB *) m_table); + Ndb *ndb= get_ndb(); + Uint64 auto_value= ndb->getAutoIncrementValue((const NDBTAB *) m_table); if (set_hidden_key(op, table->fields, (const byte*)&auto_value)) ERR_RETURN(op->getNdbError()); } @@ -1727,11 +1738,12 @@ int ha_ndbcluster::write_row(byte *record) } if ((has_auto_increment) && (skip_auto_increment)) { + Ndb *ndb= get_ndb(); Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; DBUG_PRINT("info", ("Trying to set next auto increment value to %lu", (ulong) next_val)); - if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE)) + if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE)) DBUG_PRINT("info", ("Setting next auto increment value to %u", next_val)); } @@ -2536,8 +2548,11 @@ void ha_ndbcluster::info(uint flag) } else { + if ((my_errno= check_ndb_connection())) + DBUG_VOID_RETURN; + Ndb *ndb= get_ndb(); Uint64 rows; - if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ + if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){ records= rows; } } @@ -2863,6 +2878,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) DBUG_RETURN(1); Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + Ndb *ndb= thd_ndb->ndb; DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d", thd_ndb->lock_count)); @@ -2880,9 +2896,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) DBUG_ASSERT(!thd->transaction.stmt.ndb_tid); DBUG_PRINT("trans",("Starting transaction stmt")); - trans= m_ndb->startTransaction(); + trans= ndb->startTransaction(); if (trans == NULL) - ERR_RETURN(m_ndb->getNdbError()); + ERR_RETURN(ndb->getNdbError()); no_uncommitted_rows_reset(thd); thd->transaction.stmt.ndb_tid= trans; } @@ -2894,9 +2910,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // A "master" transaction ha not been started yet DBUG_PRINT("trans",("starting transaction, all")); - trans= m_ndb->startTransaction(); + trans= ndb->startTransaction(); if (trans == NULL) - ERR_RETURN(m_ndb->getNdbError()); + ERR_RETURN(ndb->getNdbError()); no_uncommitted_rows_reset(thd); /* @@ -2935,7 +2951,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) retrieve_all_fields= FALSE; ops_pending= 0; { - NDBDICT *dict= m_ndb->getDictionary(); + NDBDICT *dict= ndb->getDictionary(); const NDBTAB *tab; void *tab_info; if (!(tab= dict->getTable(m_tabname, &tab_info))) @@ -2962,7 +2978,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) We must in this case close the transaction to release resources */ DBUG_PRINT("trans",("ending non-updating transaction")); - m_ndb->closeTransaction(m_active_trans); + ndb->closeTransaction(m_active_trans); thd->transaction.stmt.ndb_tid= 0; } } @@ -3008,16 +3024,17 @@ int ha_ndbcluster::start_stmt(THD *thd) NdbConnection *trans= (NdbConnection*)thd->transaction.stmt.ndb_tid; if (!trans){ + Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; DBUG_PRINT("trans",("Starting transaction stmt")); NdbConnection *tablock_trans= (NdbConnection*)thd->transaction.all.ndb_tid; DBUG_PRINT("info", ("tablock_trans: %x", (uint)tablock_trans)); DBUG_ASSERT(tablock_trans); -// trans= m_ndb->hupp(tablock_trans); - trans= m_ndb->startTransaction(); +// trans= ndb->hupp(tablock_trans); + trans= ndb->startTransaction(); if (trans == NULL) - ERR_RETURN(m_ndb->getNdbError()); + ERR_RETURN(ndb->getNdbError()); no_uncommitted_rows_reset(thd); thd->transaction.stmt.ndb_tid= trans; } @@ -3363,7 +3380,8 @@ int ha_ndbcluster::create(const char *name, DBUG_RETURN(my_errno); // Create the table in NDB - NDBDICT *dict= m_ndb->getDictionary(); + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); if (dict->createTable(tab) != 0) { const NdbError err= dict->getNdbError(); @@ -3408,7 +3426,8 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info, bool unique) { - NdbDictionary::Dictionary *dict= m_ndb->getDictionary(); + Ndb *ndb= get_ndb(); + NdbDictionary::Dictionary *dict= ndb->getDictionary(); KEY_PART_INFO *key_part= key_info->key_part; KEY_PART_INFO *end= key_part + key_info->key_parts; @@ -3476,7 +3495,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) int ha_ndbcluster::alter_table_name(const char *from, const char *to) { - NDBDICT *dict= m_ndb->getDictionary(); + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); const NDBTAB *orig_tab; DBUG_ENTER("alter_table_name_table"); DBUG_PRINT("enter", ("Renaming %s to %s", from, to)); @@ -3521,8 +3541,9 @@ int ha_ndbcluster::delete_table(const char *name) int ha_ndbcluster::drop_table() { - NdbDictionary::Dictionary *dict= m_ndb->getDictionary(); - + Ndb *ndb= get_ndb(); + NdbDictionary::Dictionary *dict= ndb->getDictionary(); + DBUG_ENTER("drop_table"); DBUG_PRINT("enter", ("Deleting %s", m_tabname)); @@ -3555,6 +3576,7 @@ longlong ha_ndbcluster::get_auto_increment() { DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); + Ndb *ndb= get_ndb(); int cache_size= (rows_to_insert - rows_inserted < autoincrement_prefetch) ? rows_to_insert - rows_inserted @@ -3563,8 +3585,8 @@ longlong ha_ndbcluster::get_auto_increment() : autoincrement_prefetch; Uint64 auto_value= (skip_auto_increment) ? - m_ndb->readAutoIncrementValue((const NDBTAB *) m_table) - : m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size); + ndb->readAutoIncrementValue((const NDBTAB *) m_table) + : ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size); DBUG_RETURN((longlong)auto_value); } @@ -3577,7 +3599,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): handler(table_arg), m_active_trans(NULL), m_active_cursor(NULL), - m_ndb(NULL), m_table(NULL), m_table_info(NULL), m_table_flags(HA_REC_NOT_IN_SEQ | @@ -3700,7 +3721,6 @@ int ha_ndbcluster::close(void) DBUG_ENTER("close"); free_share(m_share); m_share= 0; release_metadata(); - m_ndb= NULL; DBUG_RETURN(0); } @@ -3761,11 +3781,12 @@ Ndb* check_ndb_in_thd(THD* thd) int ha_ndbcluster::check_ndb_connection() { THD* thd= current_thd; + Ndb *ndb; DBUG_ENTER("check_ndb_connection"); - if (!(m_ndb= check_ndb_in_thd(thd))) + if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - m_ndb->setDatabaseName(m_dbname); + ndb->setDatabaseName(m_dbname); DBUG_RETURN(0); } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 8224d1c4167..47d2c8f1ab9 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -229,7 +229,6 @@ class ha_ndbcluster: public handler NdbConnection *m_active_trans; NdbResultSet *m_active_cursor; - Ndb *m_ndb; void *m_table; void *m_table_info; char m_dbname[FN_HEADLEN]; @@ -257,6 +256,7 @@ class ha_ndbcluster: public handler uint32 blobs_buffer_size; uint dupkey; + Ndb *get_ndb(); void set_rec_per_key(); void records_update(); void no_uncommitted_rows_execute_failure(); From 54b768472c3d304d18118ee8f88c7afe3ad92743 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 31 Dec 2004 12:46:18 +0100 Subject: [PATCH 2/8] - Bootrap now uses "compile-dist" by default to create the source distribution Build-tools/Bootstrap: - Use BUILD/compile-dist instead of compile-pentium-max - some minor cleanups --- BUILD/compile-dist | 46 +++++++++++++++++++++++++++++++++++++++++++ Build-tools/Bootstrap | 44 +++++++++++++++++++++-------------------- 2 files changed, 69 insertions(+), 21 deletions(-) create mode 100755 BUILD/compile-dist diff --git a/BUILD/compile-dist b/BUILD/compile-dist new file mode 100755 index 00000000000..2344d4dfffd --- /dev/null +++ b/BUILD/compile-dist @@ -0,0 +1,46 @@ +#!/bin/sh +# +# This script's purpose is to update the automake/autoconf helper scripts and +# to run a plain "configure" without any special compile flags. Only features +# that affect the content of the source distribution are enabled. The resulting +# tree can then be picked up by "make dist" to create the "pristine source +# package" that is used as the basis for all other binary builds. +# +make distclean +aclocal +autoheader +libtoolize --automake --force --copy +automake --force --add-missing --copy +autoconf +(cd bdb/dist && sh s_all) +(cd innobase && aclocal && autoheader && aclocal && automake && autoconf) + +# Default to gcc for CC and CXX +if test -z "$CXX" ; then + export CXX=gcc +fi + +if test -z "$CC" ; then + export CC=gcc +fi + +# Use ccache, if available +if ccache -V > /dev/null 2>&1 +then + if ! (echo "$CC" | grep "ccache" > /dev/null) + then + export CC="ccache $CC" + fi + if ! (echo "$CXX" | grep "ccache" > /dev/null) + then + export CXX="ccache $CXX" + fi +fi + +# Make sure to enable all features that affect "make dist" +./configure \ + --with-embedded-server \ + --with-berkeley-db \ + --with-innodb \ + --enable-thread-safe-client +make diff --git a/Build-tools/Bootstrap b/Build-tools/Bootstrap index a7d347ba32f..fc36c51ec85 100755 --- a/Build-tools/Bootstrap +++ b/Build-tools/Bootstrap @@ -26,7 +26,7 @@ else } # Some predefined settings -$build_command= "BUILD/compile-pentium-max"; +$build_command= "BUILD/compile-dist"; $PWD= cwd(); $opt_docdir= $PWD . "/mysqldoc"; $opt_archive_log= undef; @@ -70,7 +70,7 @@ GetOptions( "test|t", "verbose|v", "win-dist|w", - "quiet|q", + "quiet|q", ) || print_help(""); # @@ -122,18 +122,8 @@ if (($opt_directory ne $PWD) && (!-d $opt_directory && !$opt_dry_run)) # if ($opt_pull) { - &logger("Updating BK tree $REPO to latest ChangeSet first"); - chdir ($REPO) or &abort("Could not chdir to $REPO!"); - &run_command("bk pull", "Could not update $REPO!"); - chdir ($PWD) or &abort("Could not chdir to $PWD!"); - - unless ($opt_skip_manual) - { - &logger("Updating manual tree in $opt_docdir"); - chdir ($opt_docdir) or &abort("Could not chdir to $opt_docdir!"); - &run_command("bk pull", "Could not update $opt_docdir!"); - chdir ($PWD) or &abort("Could not chdir to $PWD!"); - } + &bk_pull("$REPO"); + &bk_pull("$opt_docdir") unless ($opt_skip_manual); } # @@ -270,7 +260,7 @@ if (defined $opt_changelog) $command.= " " . $REPO . " > $target_dir/ChangeLog"; &logger($command); # We cannot use run_command here because of output redirection - if (!$opt_dry_run) + unless ($opt_dry_run) { system($command) == 0 or &abort("Could not create $target_dir/ChangeLog!"); } @@ -281,17 +271,17 @@ if (defined $opt_changelog) # unless ($opt_skip_manual) { - $msg= "Updating manual files"; - &logger($msg); + &logger("Updating manual files"); foreach $file qw/internals manual reservedwords/ { system ("bk cat $opt_docdir/Docs/$file.texi > $target_dir/Docs/$file.texi") == 0 or &abort("Could not update $file.texi in $target_dir/Docs/!"); } - system ("rm -f $target_dir/Docs/Images/Makefile*") == 0 - or &abort("Could not remove Makefiles in $target_dir/Docs/Images/!"); - system ("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images") == 0 - or &abort("Could not copy image files in $target_dir/Docs/Images/!"); + + &run_command("rm -f $target_dir/Docs/Images/Makefile*", + "Could not remove Makefiles in $target_dir/Docs/Images/!"); + &run_command("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images", + "Could not copy image files in $target_dir/Docs/Images/!"); } # @@ -377,6 +367,18 @@ if ($opt_archive_log) exit 0; +# +# Run a BK pull on the given BK tree +# +sub bk_pull +{ + my $bk_tree= $_[0]; + &logger("Updating BK tree $bk_tree to latest ChangeSet first"); + chdir ($bk_tree) or &abort("Could not chdir to $bk_tree!"); + &run_command("bk pull", "Could not update $bk_tree!"); + chdir ($PWD) or &abort("Could not chdir to $PWD!"); +} + # # Print the help text message (with an optional message on top) # From 0428fcb89ea71ef397b26c65e11ef77097a3a83f Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 31 Dec 2004 13:12:36 +0100 Subject: [PATCH 3/8] - updated compile-dist to include NDB cluster BUILD/compile-dist: - make sure to include NDB cluster in the distribution, too --- BUILD/compile-dist | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/BUILD/compile-dist b/BUILD/compile-dist index 2344d4dfffd..f27c218747c 100755 --- a/BUILD/compile-dist +++ b/BUILD/compile-dist @@ -42,5 +42,6 @@ fi --with-embedded-server \ --with-berkeley-db \ --with-innodb \ - --enable-thread-safe-client + --enable-thread-safe-client \ + --with-ndbcluster make From cd47cb56fd76a47def597e7b486b89c65a65481d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 31 Dec 2004 15:05:41 +0200 Subject: [PATCH 4/8] row0upd.c: Fix a little bug in InnoDB: we looked at the physical size of a stored SQL NULL value from a wrong field in the index; this has probably caused no bugs visible to the user, only caused some extra space usage in some rare cases; we may later backport the fix to 4.0 innobase/row/row0upd.c: Fix a little bug in InnoDB: we looked at the physical size of a stored SQL NULL value from a wrong field in the index; this has probably caused no bugs visible to the user, only caused some extra space usage in some rare cases; we may later backport the fix to 4.0 --- innobase/row/row0upd.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/innobase/row/row0upd.c b/innobase/row/row0upd.c index a449b9f1736..9192f6dc692 100644 --- a/innobase/row/row0upd.c +++ b/innobase/row/row0upd.c @@ -381,8 +381,14 @@ row_upd_changes_field_size_or_external( new_len = new_val->len; if (new_len == UNIV_SQL_NULL) { + /* A bug fixed on Dec 31st, 2004: we looked at the + SQL NULL size from the wrong field! We may backport + this fix also to 4.0. The merge to 5.0 will be made + manually immediately after we commit this to 4.1. */ + new_len = dtype_get_sql_null_size( - dict_index_get_nth_type(index, i)); + dict_index_get_nth_type(index, + upd_field->field_no)); } old_len = rec_get_nth_field_size(rec, upd_field->field_no); From 56db297d002150216c1085550b39a8bec65caf64 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 1 Jan 2005 13:49:53 +0200 Subject: [PATCH 5/8] log0recv.c: Fix a wrong memset in InnoDB Hot Backup code; the bug probably did not affect anything since we do not assume that the header of a log file is filled with zeros before writing the header info there; the bug found by Felix von Leitner innobase/log/log0recv.c: Fix a wrong memset in InnoDB Hot Backup code; the bug probably did not affect anything since we do not assume that the header of a log file is filled with zeros before writing the header info there; the bug found by Felix von Leitner --- innobase/log/log0recv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c index 10f921bb1f0..ae84f085523 100644 --- a/innobase/log/log0recv.c +++ b/innobase/log/log0recv.c @@ -2990,8 +2990,7 @@ recv_reset_log_files_for_backup( memcpy(name + log_dir_len, logfilename, sizeof logfilename); buf = ut_malloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE); - memset(buf, LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE, '\0'); - + memset(buf, '\0', LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE); for (i = 0; i < n_log_files; i++) { From 3e2e4fb77fa30ca66849f9e05a30a7f339348a63 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 1 Jan 2005 19:27:41 +0100 Subject: [PATCH 6/8] mysql-test-run.pl: Added --ps-protocol and --embedded-server mysql-test/mysql-test-run.pl: Added --ps-protocol and --embedded-server --- mysql-test/mysql-test-run.pl | 125 ++++++++++++++++++++++------------- 1 file changed, 79 insertions(+), 46 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index c90ebf22dad..a69dcdce5c6 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -95,6 +95,7 @@ $Devel::Trace::TRACE= 1; my @skip_if_embedded_server= ( + "alter_table", "bdb-deadlock", "connect", "flush_block_commit", @@ -148,6 +149,7 @@ our @mysqld_src_dirs= our $glob_win32= 0; our $glob_mysql_test_dir= undef; +our $glob_mysql_bench_dir= undef; our $glob_hostname= undef; our $glob_scriptname= undef; our $glob_use_running_server= 0; @@ -237,6 +239,8 @@ our $opt_skip_test; our $opt_sleep; +our $opt_ps_protocol; + # FIXME all of the sleep time handling needs cleanup our $opt_sleep_time_after_restart= 1; our $opt_sleep_time_for_delete= 10; @@ -301,7 +305,7 @@ sub mysqld_arguments ($$$$$); sub stop_masters_slaves (); sub stop_masters (); sub stop_slaves (); -sub run_mysqltest ($); +sub run_mysqltest ($$); ###################################################################### # @@ -396,6 +400,7 @@ sub initial_setup () { # 'basedir' is always parent of "mysql-test" directory $glob_mysql_test_dir= cwd(); $glob_basedir= dirname($glob_mysql_test_dir); + $glob_mysql_bench_dir= "$glob_basedir/mysql-bench"; # FIXME make configurable $path_timefile= "$glob_mysql_test_dir/var/log/mysqltest-time"; @@ -441,6 +446,7 @@ sub command_line_setup () { 'debug' => \$opt_debug, 'do-test=s' => \$opt_do_test, 'embedded-server' => \$opt_embedded_server, + 'ps-protocol' => \$opt_ps_protocol, 'extern' => \$opt_extern, 'fast' => \$opt_fast, 'force' => \$opt_force, @@ -458,6 +464,7 @@ sub command_line_setup () { 'netware' => \$opt_netware, 'no-manager' => \$opt_no_manager, 'old-master' => \$opt_old_master, + 'ps-protocol' => \$opt_ps_protocol, 'record' => \$opt_record, 'script-debug' => \$opt_script_debug, 'skip-rpl' => \$opt_skip_rpl, @@ -526,7 +533,7 @@ sub command_line_setup () { if ( $opt_extern and $opt_local ) { - die "Can't use --extern and --local at the same time"; + mtr_error("Can't use --extern and --local at the same time"); } if ( ! $opt_socket ) @@ -568,7 +575,7 @@ sub command_line_setup () { if ( $opt_extern ) { - die "Can't use --extern with --embedded-server"; + mtr_error("Can't use --extern with --embedded-server"); } $opt_result_ext= ".es"; } @@ -589,12 +596,14 @@ sub command_line_setup () { $opt_sleep_time_after_restart= $opt_sleep; } - if ( $opt_gcov ) + if ( $opt_gcov and ! $opt_source_dist ) { - if ( $opt_source_dist ) - { - die "Coverage test needs the source - please use source dist"; - } + mtr_error("Coverage test needs the source - please use source dist"); + } + + if ( $glob_use_embedded_server and ! $opt_source_dist ) + { + mtr_error("Embedded server needs source tree - please use source dist"); } if ( $opt_gdb ) @@ -602,7 +611,7 @@ sub command_line_setup () { $opt_wait_timeout= 300; if ( $opt_extern ) { - die "Can't use --extern with --gdb"; + mtr_error("Can't use --extern with --gdb"); } } @@ -611,7 +620,7 @@ sub command_line_setup () { $opt_gdb= 1; if ( $opt_extern ) { - die "Can't use --extern with --manual-gdb"; + mtr_error("Can't use --extern with --manual-gdb"); } } @@ -619,7 +628,7 @@ sub command_line_setup () { { if ( $opt_extern ) { - die "Can't use --extern with --ddd"; + mtr_error("Can't use --extern with --ddd"); } } @@ -689,10 +698,10 @@ sub executable_setup () { { mtr_error("Cannot find embedded server 'mysqltest'"); } + $path_tests_bindir= "$glob_basedir/libmysqld/examples"; } else { - $exe_mysqld= "$glob_basedir/sql/mysqld"; if ( -f "$glob_basedir/client/.libs/lt-mysqltest" ) { $exe_mysqltest= "$glob_basedir/client/.libs/lt-mysqltest"; @@ -705,6 +714,7 @@ sub executable_setup () { { $exe_mysqltest= "$glob_basedir/client/mysqltest"; } + $path_tests_bindir= "$glob_basedir/tests"; } if ( -f "$glob_basedir/client/.libs/mysqldump" ) { @@ -723,8 +733,8 @@ sub executable_setup () { $exe_mysqlbinlog= "$glob_basedir/client/mysqlbinlog"; } + $exe_mysqld= "$glob_basedir/sql/mysqld"; $path_client_bindir= "$glob_basedir/client"; - $path_tests_bindir= "$glob_basedir/tests"; $exe_mysqladmin= "$path_client_bindir/mysqladmin"; $exe_mysql= "$path_client_bindir/mysql"; $path_language= "$glob_basedir/sql/share/english/"; @@ -791,7 +801,7 @@ sub handle_int_signal () { $SIG{INT}= 'DEFAULT'; # If we get a ^C again, we die... mtr_warning("got INT signal, cleaning up....."); stop_masters_slaves(); - exit(1); + mtr_error("We die from ^C signal from user"); } @@ -806,7 +816,7 @@ sub collect_test_cases () { my @tests; # Array of hash, will be array of C struct - opendir(TESTDIR, $testdir) or die "Can't open dir \"$testdir\": $!"; + opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); foreach my $elem ( sort readdir(TESTDIR) ) { my $tname= mtr_match_extension($elem,"test"); @@ -1066,7 +1076,7 @@ sub sleep_until_file_created ($$) { if ( ! -r $pidfile ) { - die "No $pidfile was created"; + mtr_error("No $pidfile was created"); } } @@ -1084,7 +1094,7 @@ sub ndbcluster_start () { mtr_report("Starting ndbcluster"); my $ndbcluster_opts= $opt_bench ? "" : "--small"; # FIXME check result code?! - mtr_run("./ndb/ndbcluster", + mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", ["--port-base=$opt_ndbcluster_port", $ndbcluster_opts, "--diskless", @@ -1094,7 +1104,7 @@ sub ndbcluster_start () { } sub ndbcluster_stop () { - mtr_run("./ndb/ndbcluster", + mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", ["--data-dir=$glob_mysql_test_dir/var", "--port-base=$opt_ndbcluster_port", "--stop"], @@ -1142,17 +1152,17 @@ sub run_benchmarks ($) { if ( ! $benchmark ) { mtr_add_arg($args, "--log"); - mtr_run("./run-all-tests", $args, "", "", "", ""); + mtr_run("$glob_mysql_bench_dir/run-all-tests", $args, "", "", "", ""); # FIXME check result code?! } elsif ( -x $benchmark ) { - mtr_run("./$benchmark", $args, "", "", "", ""); + mtr_run("$glob_mysql_bench_dir/$benchmark", $args, "", "", "", ""); # FIXME check result code?! } else { - mtr_error("benchmark $benchmark not found"); + mtr_error("Benchmark $benchmark not found"); } chdir($glob_mysql_test_dir); # Go back @@ -1172,6 +1182,8 @@ sub run_benchmarks ($) { sub run_tests () { + mtr_report("Finding Tests"); + my $tests= collect_test_cases(); mtr_report("Starting Tests"); @@ -1255,7 +1267,7 @@ sub install_db ($$) { if ( mtr_run($exe_mysqld, $args, $init_db_sql, $path_manager_log, $path_manager_log, "") != 0 ) { - mtr_error("error executing mysqld --bootstrap\n" . + mtr_error("Error executing mysqld --bootstrap\n" . "Could not install $type test DBs"); } } @@ -1293,6 +1305,7 @@ sub run_testcase ($) { if ( $tinfo->{'skip'} ) { + mtr_report_test_name($tinfo); mtr_report_test_skipped($tinfo); return; } @@ -1323,14 +1336,24 @@ sub run_testcase ($) { # ---------------------------------------------------------------------- stop_slaves(); + } - # ---------------------------------------------------------------------- - # Start masters - # ---------------------------------------------------------------------- + # ---------------------------------------------------------------------- + # Prepare to start masters. Even if we use embedded, we want to run + # the preparation. + # ---------------------------------------------------------------------- - mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); - do_before_start_master($tname,$tinfo->{'master_sh'}); + mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); + do_before_start_master($tname,$tinfo->{'master_sh'}); + # ---------------------------------------------------------------------- + # Start masters + # ---------------------------------------------------------------------- + + mtr_report_test_name($tinfo); + + if ( ! $glob_use_running_server and ! $glob_use_embedded_server ) + { # FIXME give the args to the embedded server?! # FIXME what does $opt_local_master mean?! # FIXME split up start and check that started so that can do @@ -1385,9 +1408,7 @@ sub run_testcase ($) { unlink("r/$tname.reject"); unlink($path_timefile); - mtr_report_test_name($tinfo); - - my $res= run_mysqltest($tinfo); + my $res= run_mysqltest($tinfo, $tinfo->{'master_opt'}); if ( $res == 0 ) { @@ -1470,7 +1491,7 @@ sub do_before_start_master ($$) { if ( $master_init_script and mtr_run($master_init_script, [], "", "", "", "") != 0 ) { - mtr_error("can't run $master_init_script"); + mtr_error("Can't run $master_init_script"); } # for gcov FIXME needed? If so we need more absolute paths # chdir($glob_basedir); @@ -1501,7 +1522,7 @@ sub do_before_start_slave ($$) { if ( $slave_init_script and mtr_run($slave_init_script, [], "", "", "", "") != 0 ) { - mtr_error("can't run $slave_init_script"); + mtr_error("Can't run $slave_init_script"); } unlink("$glob_mysql_test_dir/var/slave-data/log.*"); @@ -1525,9 +1546,11 @@ sub mysqld_arguments ($$$$$) { if ( $glob_use_embedded_server ) { $prefix= "--server-arg="; + } else { + # We can't pass embedded server --no-defaults + mtr_add_arg($args, "%s--no-defaults", $prefix); } - mtr_add_arg($args, "%s--no-defaults", $prefix); mtr_add_arg($args, "%s--basedir=%s", $prefix, $path_my_basedir); mtr_add_arg($args, "%s--character-sets-dir=%s", $prefix, $path_charsetsdir); mtr_add_arg($args, "%s--core", $prefix); @@ -1815,7 +1838,7 @@ sub mysqld_start ($$$$) { } } - die "Can't start mysqld FIXME"; + mtr_error("Can't start mysqld FIXME"); } sub stop_masters_slaves () { @@ -1870,8 +1893,9 @@ sub stop_slaves () { } -sub run_mysqltest ($) { - my $tinfo= shift; +sub run_mysqltest ($$) { + my $tinfo= shift; + my $master_opts= shift; # FIXME set where???? my $cmdline_mysqldump= "$exe_mysqldump --no-defaults -uroot " . @@ -1901,19 +1925,11 @@ sub run_mysqltest ($) { $ENV{'CLIENT_BINDIR'}= $path_client_bindir; $ENV{'TESTS_BINDIR'}= $path_tests_bindir; - my $exe= $exe_mysqltest; - my $args; # Arg vector + my $exe= $exe_mysqltest; + my $args; mtr_init_args(\$args); - if ( $opt_strace_client ) - { - $exe= "strace"; # FIXME there are ktrace, .... - mtr_add_arg($args, "-o"); - mtr_add_arg($args, "%s/var/log/mysqltest.strace", $glob_mysql_test_dir); - mtr_add_arg($args, "$exe_mysqltest"); - } - mtr_add_arg($args, "--no-defaults"); mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'}); mtr_add_arg($args, "--database=test"); @@ -1925,6 +1941,19 @@ sub run_mysqltest ($) { mtr_add_arg($args, "--tmpdir=%s", $opt_tmpdir); mtr_add_arg($args, "--port=%d", $master->[0]->{'path_myport'}); + if ( $opt_ps_protocol ) + { + mtr_add_arg($args, "--ps-protocol"); + } + + if ( $opt_strace_client ) + { + $exe= "strace"; # FIXME there are ktrace, .... + mtr_add_arg($args, "-o"); + mtr_add_arg($args, "%s/var/log/mysqltest.strace", $glob_mysql_test_dir); + mtr_add_arg($args, "$exe_mysqltest"); + } + if ( $opt_timer ) { mtr_add_arg($args, "--timer-file=var/log/timer"); @@ -1966,6 +1995,10 @@ sub run_mysqltest ($) { mtr_add_arg($args, "-R"); mtr_add_arg($args, $tinfo->{'result_file'}); + # ---------------------------------------------------------------------- + # If embedded server, we create server args to give mysqltest to pass on + # ---------------------------------------------------------------------- + if ( $glob_use_embedded_server ) { mysqld_arguments($args,'master',0,$tinfo->{'master_opt'},[]); From e2d17faa2bfb291c51581b96a48968681b9d16f8 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 1 Jan 2005 22:40:40 +0100 Subject: [PATCH 7/8] Fixed failed merge --- sql/ha_ndbcluster.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 699a75a94c7..9e50ef9ed2a 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2784,7 +2784,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) { DBUG_PRINT("info", ("Ignoring duplicate key")); m_ignore_dup_key= TRUE; - + } break; case HA_EXTRA_NO_IGNORE_DUP_KEY: DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); @@ -3661,15 +3661,16 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) if (check_ndb_connection()) DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION); - - dict= m_ndb->getDictionary(); + + Ndb *ndb= get_ndb(); + dict= ndb->getDictionary(); if (!(orig_tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); m_table= (void *)orig_tab; // Change current database to that of target table set_dbname(to); - m_ndb->setDatabaseName(m_dbname); + ndb->setDatabaseName(m_dbname); if (!(result= alter_table_name(new_tabname))) { // Rename .ndb file @@ -3688,7 +3689,6 @@ int ha_ndbcluster::alter_table_name(const char *to) { Ndb *ndb= get_ndb(); NDBDICT *dict= ndb->getDictionary(); - NDBDICT * dict= m_ndb->getDictionary(); const NDBTAB *orig_tab= (const NDBTAB *) m_table; int ret; DBUG_ENTER("alter_table_name_table"); From 7a18eb7a6bee68767b0b2d59722d0bdfe551f7ce Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 1 Jan 2005 22:47:50 +0100 Subject: [PATCH 8/8] Fixed failed merge --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 9e50ef9ed2a..3c6cd83d5dc 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -212,6 +212,7 @@ Thd_ndb::~Thd_ndb() { if (ndb) delete ndb; + ndb= 0; } inline @@ -2687,7 +2688,6 @@ void ha_ndbcluster::info(uint flag) { DBUG_PRINT("info", ("HA_STATUS_ERRKEY")); errkey= m_dupkey; - errkey= m_dupkey; } if (flag & HA_STATUS_AUTO) DBUG_PRINT("info", ("HA_STATUS_AUTO"));