From 11652c1f45f1c6be6c15abfe926da1cef1b8b4f9 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 18 Apr 2005 07:26:23 +0400 Subject: [PATCH 01/11] Fix for BUG#9298: Make int->string conversion sign-aware in Protocol_simple::store_long mysql-test/r/group_by.result: Testcase for BUG#9298 mysql-test/t/group_by.test: Testcase for BUG#9298 --- mysql-test/r/group_by.result | 9 +++++++++ mysql-test/t/group_by.test | 7 +++++++ sql/protocol.cc | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index a8766907fa5..b0c00a51722 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -702,3 +702,12 @@ c val-74 val-98 drop table t1,t2; +create table t1 (b int4 unsigned not null); +insert into t1 values(3000000000); +select * from t1; +b +3000000000 +select min(b) from t1; +min(b) +3000000000 +drop table t1; diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index 46e58cd00fd..fbd39019e6d 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -515,3 +515,10 @@ explain select c from t2 where a = 2 and b = 'val-2' group by c; select c from t2 where a = 2 and b = 'val-2' group by c; drop table t1,t2; +# Test for BUG#9298 "Wrong handling of int4 unsigned columns in GROUP functions" +# (the actual problem was with protocol code, not GROUP BY) +create table t1 (b int4 unsigned not null); +insert into t1 values(3000000000); +select * from t1; +select min(b) from t1; +drop table t1; diff --git a/sql/protocol.cc b/sql/protocol.cc index 773bbe697a3..485605ce8cd 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -810,7 +810,7 @@ bool Protocol_simple::store_long(longlong from) #endif char buff[20]; return net_store_data((char*) buff, - (uint) (int10_to_str((int) from,buff, -10)-buff)); + (uint) (int10_to_str((int)from,buff, (from <0)?-10:10)-buff)); } From 32d2b6ed04bc75173e90531c2dd3f82b27606734 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Apr 2005 11:31:16 +0200 Subject: [PATCH 02/11] Post review fix of ndbcluster_drop_database --- sql/ha_ndbcluster.cc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 230ca2826b2..bb20cedec2e 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB + /* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4327,6 +4327,7 @@ int ndbcluster_drop_database(const char *path) uint i; char *tabname; List drop_list; + int ret= 0; ha_ndbcluster::set_dbname(path, (char *)&dbname); DBUG_PRINT("enter", ("db: %s", dbname)); @@ -4353,10 +4354,15 @@ int ndbcluster_drop_database(const char *path) ndb->setDatabaseName(dbname); List_iterator_fast it(drop_list); while ((tabname=it++)) - if (dict->dropTable(tabname)) - ERR_RETURN(dict->getNdbError()); - - DBUG_RETURN(0); + { + if (!dict->dropTable(tabname)) + { + const NdbError err= dict->getNdbError(); + if (err.code != 709) + ret= ndb_to_mysql_error(&err); + } + } + DBUG_RETURN(ret); } From d490e4def8ee4c4289f6765e57a9dd68a1bf6fcc Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Apr 2005 11:34:47 +0200 Subject: [PATCH 03/11] Post review fix of ndbcluster_drop_database, print error --- sql/ha_ndbcluster.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bb20cedec2e..2077fcb8f2d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4359,7 +4359,10 @@ int ndbcluster_drop_database(const char *path) { const NdbError err= dict->getNdbError(); if (err.code != 709) + { + ERR_PRINT(err); ret= ndb_to_mysql_error(&err); + } } } DBUG_RETURN(ret); From 86c75663b414e3bc3a24b2baade8caf8799aa503 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Apr 2005 11:54:00 +0200 Subject: [PATCH 04/11] Post review fix of ndbcluster_drop_database, typo --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 2077fcb8f2d..8c12cccf5ee 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4355,7 +4355,7 @@ int ndbcluster_drop_database(const char *path) List_iterator_fast it(drop_list); while ((tabname=it++)) { - if (!dict->dropTable(tabname)) + if (dict->dropTable(tabname)) { const NdbError err= dict->getNdbError(); if (err.code != 709) From 27a6a8146417829d9edfcb8fb86306f985e03bd2 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 27 Apr 2005 18:17:41 +0200 Subject: [PATCH 05/11] Fix for avoiding gettin Invalid schema object version when doing local changes --- mysql-test/r/ndb_multi.result | 22 +++++++++++- mysql-test/t/ndb_multi.test | 24 +++++++++++++ ndb/include/ndbapi/NdbDictionary.hpp | 5 ++- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 11 +++--- sql/ha_ndbcluster.cc | 54 +++++++++++++++++++++------- sql/ha_ndbcluster.h | 4 +-- 6 files changed, 99 insertions(+), 21 deletions(-) diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result index 5696fda1c07..2080be241e8 100644 --- a/mysql-test/r/ndb_multi.result +++ b/mysql-test/r/ndb_multi.result @@ -13,6 +13,26 @@ a show status like 'handler_discover%'; Variable_name Value Handler_discover 0 +select * from t1; +a +2 +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +select * from t1; +a +2 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +select * from t1; +ERROR HY000: Got error 241 'Invalid schema object version' from ndbcluster +select * from t1; +a +2 flush status; select * from t1; a @@ -20,7 +40,7 @@ a update t1 set a=3 where a=2; show status like 'handler_discover%'; Variable_name Value -Handler_discover 1 +Handler_discover 0 create table t3 (a int not null primary key, b varchar(22), c int, last_col text) engine=ndb; insert into t3 values(1, 'Hi!', 89, 'Longtext column'); diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 24651913a79..85950c72cf9 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -18,6 +18,30 @@ select * from t1; select * from t2; show status like 'handler_discover%'; +# Check dropping and recreating table on same server +connect (con1,localhost,,,test); +connect (con2,localhost,,,test); +connection con1; +select * from t1; +connection con2; +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +connection con1; +select * from t1; + +# Check dropping and recreating table on different server +connection server2; +show status like 'handler_discover%'; +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +connection server1; +# Currently a retry is required remotely +--error 1296 +select * from t1; +select * from t1; + # Connect to server2 and use the tables from there connection server2; flush status; diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 6aa675a2319..0e4f506c604 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -75,8 +75,11 @@ public: Changed, ///< The object has been modified in memory ///< and has to be commited in NDB Kernel for ///< changes to take effect - Retrieved ///< The object exist and has been read + Retrieved, ///< The object exist and has been read ///< into main memory from NDB Kernel + Invalid ///< The object has been invalidated + ///< and should not be used + }; /** diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index fb2e0d673cd..4523ae2c261 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1448,6 +1448,7 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl) // If in local cache it must be in global if (!cachedImpl) abort(); + cachedImpl->m_status = NdbDictionary::Object::Invalid; m_globalHash->drop(cachedImpl); m_globalHash->unlock(); } @@ -1747,8 +1748,8 @@ NdbDictionaryImpl::dropTable(const char * name) DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName)); m_localHash.drop(internalTableName); - m_globalHash->lock(); + tab->m_status = NdbDictionary::Object::Invalid; m_globalHash->drop(tab); m_globalHash->unlock(); DBUG_RETURN(dropTable(name)); @@ -1792,10 +1793,11 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl) int ret = m_receiver.dropTable(impl); if(ret == 0 || m_error.code == 709){ const char * internalTableName = impl.m_internalName.c_str(); + m_localHash.drop(internalTableName); - m_globalHash->lock(); + impl.m_status = NdbDictionary::Object::Invalid; m_globalHash->drop(&impl); m_globalHash->unlock(); @@ -1889,6 +1891,7 @@ NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl) m_localHash.drop(internalTableName); m_globalHash->lock(); + impl.m_status = NdbDictionary::Object::Invalid; m_globalHash->drop(&impl); m_globalHash->unlock(); return 0; @@ -2152,8 +2155,8 @@ NdbDictionaryImpl::dropIndex(const char * indexName, m_ndb.internalizeTableName(indexName); // Index is also a table m_localHash.drop(internalIndexName); - m_globalHash->lock(); + idx->m_table->m_status = NdbDictionary::Object::Invalid; m_globalHash->drop(idx->m_table); m_globalHash->unlock(); return dropIndex(indexName, tableName); @@ -2187,8 +2190,8 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName) int ret = m_receiver.dropIndex(impl, *timpl); if(ret == 0){ m_localHash.drop(internalIndexName); - m_globalHash->lock(); + impl.m_table->m_status = NdbDictionary::Object::Invalid; m_globalHash->drop(impl.m_table); m_globalHash->unlock(); } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8c12cccf5ee..0d6bfcb9d0d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -331,11 +331,14 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) # The mapped error code */ -void ha_ndbcluster::invalidateDictionaryCache() +void ha_ndbcluster::invalidate_dictionary_cache(bool global) { NDBDICT *dict= get_ndb()->getDictionary(); DBUG_PRINT("info", ("invalidating %s", m_tabname)); - dict->invalidateTable(m_tabname); + if (global) + dict->invalidateTable(m_tabname); + else + dict->removeCachedTable(m_tabname); table->version=0L; /* Free when thread is ready */ /* Invalidate indexes */ for (uint i= 0; i < table->keys; i++) @@ -347,12 +350,21 @@ void ha_ndbcluster::invalidateDictionaryCache() switch(idx_type) { case(PRIMARY_KEY_ORDERED_INDEX): case(ORDERED_INDEX): - dict->invalidateIndex(index->getName(), m_tabname); + if (global) + dict->invalidateIndex(index->getName(), m_tabname); + else + dict->removeCachedIndex(index->getName(), m_tabname); break; case(UNIQUE_ORDERED_INDEX): - dict->invalidateIndex(index->getName(), m_tabname); + if (global) + dict->invalidateIndex(index->getName(), m_tabname); + else + dict->removeCachedIndex(index->getName(), m_tabname); case(UNIQUE_INDEX): - dict->invalidateIndex(unique_index->getName(), m_tabname); + if (global) + dict->invalidateIndex(unique_index->getName(), m_tabname); + else + dict->removeCachedIndex(unique_index->getName(), m_tabname); break; case(PRIMARY_KEY_INDEX): case(UNDEFINED_INDEX): @@ -371,7 +383,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) switch (err.classification) { case NdbError::SchemaError: { - invalidateDictionaryCache(); + invalidate_dictionary_cache(TRUE); if (err.code==284) { @@ -767,7 +779,13 @@ int ha_ndbcluster::get_metadata(const char *path) if (!(tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); + if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + invalidate_dictionary_cache(FALSE); + if (!(tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); + } /* Compare FrmData in NDB with frm file from disk. */ @@ -786,7 +804,7 @@ int ha_ndbcluster::get_metadata(const char *path) if (!invalidating_ndb_table) { DBUG_PRINT("info", ("Invalidating table")); - invalidateDictionaryCache(); + invalidate_dictionary_cache(TRUE); invalidating_ndb_table= TRUE; } else @@ -812,7 +830,7 @@ int ha_ndbcluster::get_metadata(const char *path) if (error) DBUG_RETURN(error); - m_tableVersion= tab->getObjectVersion(); + m_table_version= tab->getObjectVersion(); m_table= (void *)tab; m_table_info= NULL; // Set in external lock @@ -3226,15 +3244,25 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) void *tab_info; if (!(tab= dict->getTable(m_tabname, &tab_info))) ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); - if (m_table != (void *)tab || m_tableVersion != tab->getObjectVersion()) + DBUG_PRINT("info", ("Table schema version: %d", + tab->getObjectVersion())); + // Check if thread has stale local cache + if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + invalidate_dictionary_cache(FALSE); + if (!(tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", + tab->getObjectVersion())); + } + if (m_table != (void *)tab || m_table_version < tab->getObjectVersion()) { /* The table has been altered, refresh the index list */ build_index_list(ndb, table, ILBP_OPEN); m_table= (void *)tab; - m_tableVersion = tab->getObjectVersion(); + m_table_version = tab->getObjectVersion(); } m_table_info= tab_info; } @@ -4006,7 +4034,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_active_trans(NULL), m_active_cursor(NULL), m_table(NULL), - m_tableVersion(-1), + m_table_version(-1), m_table_info(NULL), m_table_flags(HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index ac2d27b9ec7..7de5dd503e7 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -203,7 +203,7 @@ class ha_ndbcluster: public handler void print_results(); longlong get_auto_increment(); - void invalidateDictionaryCache(); + void invalidate_dictionary_cache(bool global); int ndb_err(NdbConnection*); bool uses_blob_value(bool all_fields); @@ -215,7 +215,7 @@ class ha_ndbcluster: public handler NdbConnection *m_active_trans; NdbResultSet *m_active_cursor; void *m_table; - int m_tableVersion; + int m_table_version; void *m_table_info; char m_dbname[FN_HEADLEN]; //char m_schemaname[FN_HEADLEN]; From 2ef10765dc6ccfba55c65df3ec19313b23d7be5e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 28 Apr 2005 09:39:29 +0200 Subject: [PATCH 06/11] Fix for avoiding gettin Invalid schema object version when doing local changes, post review fixes --- sql/ha_ndbcluster.cc | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 0d6bfcb9d0d..306d3224930 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -336,7 +336,17 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global) NDBDICT *dict= get_ndb()->getDictionary(); DBUG_PRINT("info", ("invalidating %s", m_tabname)); if (global) - dict->invalidateTable(m_tabname); + { + if (((const NDBTAB *)m_table)->getObjectStatus() + == NdbDictionary::Object::Invalid) + { + // Global cache has already been invalidated + dict->removeCachedTable(m_tabname); + global= FALSE; + } + else + dict->invalidateTable(m_tabname); + } else dict->removeCachedTable(m_tabname); table->version=0L; /* Free when thread is ready */ @@ -779,6 +789,7 @@ int ha_ndbcluster::get_metadata(const char *path) if (!(tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); + // Check if thread has stale local cache if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) { invalidate_dictionary_cache(FALSE); @@ -804,6 +815,7 @@ int ha_ndbcluster::get_metadata(const char *path) if (!invalidating_ndb_table) { DBUG_PRINT("info", ("Invalidating table")); + m_table= (void *) tab; invalidate_dictionary_cache(TRUE); invalidating_ndb_table= TRUE; } @@ -3288,7 +3300,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd->transaction.stmt.ndb_tid= 0; } } - m_table= NULL; m_table_info= NULL; /* This is the place to make sure this handler instance @@ -3910,7 +3921,13 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) dict= ndb->getDictionary(); if (!(orig_tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); - + // Check if thread has stale local cache + if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + dict->removeCachedTable(m_tabname); + if (!(orig_tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + } m_table= (void *)orig_tab; // Change current database to that of target table set_dbname(to); @@ -4278,7 +4295,6 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name, DBUG_RETURN(1); ERR_RETURN(err); } - DBUG_PRINT("info", ("Found table %s", tab->getName())); len= tab->getFrmLength(); From c97859b300ce982cedebc5f527fb7acbaa28712a Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 28 Apr 2005 10:46:39 +0200 Subject: [PATCH 07/11] Fix for avoiding gettin Invalid schema object version when doing local changes, more post review fixes --- sql/ha_ndbcluster.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 306d3224930..181ae5e5b54 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -334,11 +334,15 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) void ha_ndbcluster::invalidate_dictionary_cache(bool global) { NDBDICT *dict= get_ndb()->getDictionary(); + DBUG_ENTER("invalidate_dictionary_cache"); DBUG_PRINT("info", ("invalidating %s", m_tabname)); + if (global) { - if (((const NDBTAB *)m_table)->getObjectStatus() - == NdbDictionary::Object::Invalid) + const NDBTAB *tab= dict->getTable(m_tabname); + if (!tab) + DBUG_VOID_RETURN; + if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) { // Global cache has already been invalidated dict->removeCachedTable(m_tabname); @@ -381,6 +385,7 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global) break; } } + DBUG_VOID_RETURN; } int ha_ndbcluster::ndb_err(NdbConnection *trans) @@ -815,7 +820,6 @@ int ha_ndbcluster::get_metadata(const char *path) if (!invalidating_ndb_table) { DBUG_PRINT("info", ("Invalidating table")); - m_table= (void *) tab; invalidate_dictionary_cache(TRUE); invalidating_ndb_table= TRUE; } From 994674c09b112745fd03355a0972fbf3f4b0295c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 28 Apr 2005 15:27:42 +0200 Subject: [PATCH 08/11] Bug#8321 - myisampack bug in compression algorithm This is the second of three changesets. It contains the pure bug fix. It also contains the second after-review fixes. The problem was that with gcc on x86, shifts are done modulo word size. 'value' is 32 bits wide and shifting it by 32 bits is a no-op. This was triggered by an evil distribution of character incidences. A distribution of 2917027827 characters made of 202 distinct values led to 34 occurrences of 32-bit Huffman codes. This might have been the first time ever that write_bits() had to write 32-bit values. Since it can be expected that one day even 32 bits might be insufficient, the third changeset suggests to enlarge some variables to 64 bits. --- myisam/myisampack.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/myisam/myisampack.c b/myisam/myisampack.c index 9f4e3bde65a..90689b08476 100644 --- a/myisam/myisampack.c +++ b/myisam/myisampack.c @@ -31,6 +31,7 @@ #define __GNU_LIBRARY__ /* Skip warnings in getopt.h */ #endif #include +#include #if INT_MAX > 32767 #define BITS_SAVED 32 @@ -1975,7 +1976,9 @@ static void write_bits (register ulong value, register uint bits) { reg3 uint byte_buff; bits= (uint) -file_buffer.bits; - byte_buff=file_buffer.byte | (uint) (value >> bits); + DBUG_ASSERT(bits <= 8 * sizeof(value)); + byte_buff= (file_buffer.byte | + ((bits != 8 * sizeof(value)) ? (uint) (value >> bits) : 0)); #if BITS_SAVED == 32 *file_buffer.pos++= (byte) (byte_buff >> 24) ; *file_buffer.pos++= (byte) (byte_buff >> 16) ; @@ -1983,7 +1986,9 @@ static void write_bits (register ulong value, register uint bits) *file_buffer.pos++= (byte) (byte_buff >> 8) ; *file_buffer.pos++= (byte) byte_buff; - value&=(1 << bits)-1; + DBUG_ASSERT(bits <= 8 * sizeof(ulong)); + if (bits != 8 * sizeof(value)) + value&= (((ulong) 1) << bits) - 1; #if BITS_SAVED == 16 if (bits >= sizeof(uint)) { From f0e256efe4a569788908d4ff9d672010b878449b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 28 Apr 2005 18:28:50 +0200 Subject: [PATCH 09/11] BUG#5964 - 4.1 MERGE tables regression from 4.0 Changed the creation of the .MRG file so that only the table name is written when the MyISAM table is in the same database as the MERGE table, a relative path is used in other cases in mysqld, and possibly an absolute path is used in an embedded server. No test case is added as the external behaviour is unchanged. Only the file names within the .MRG file are changed. include/my_sys.h: BUG#5964 - 4.1 MERGE tables regression from 4.0 Added declaration for a new function. myisammrg/myrg_open.c: BUG#5964 - 4.1 MERGE tables regression from 4.0 Changed check for absolute path to check for any path. mysys/my_getwd.c: BUG#5964 - 4.1 MERGE tables regression from 4.0 Added a new functions which checks for absolute _or_ relative paths. sql/ha_myisammrg.cc: BUG#5964 - 4.1 MERGE tables regression from 4.0 Changed the creation of the .MRG file so that only the table name is written when the MyISAM table is in the same database as the MERGE table, a relative path is used in other cases in mysqld, and possibly an absolute path is used in an embedded server. --- include/my_sys.h | 1 + myisammrg/myrg_open.c | 2 +- mysys/my_getwd.c | 22 ++++++++++++++++++++++ sql/ha_myisammrg.cc | 28 ++++++++++++++++++++++++---- 4 files changed, 48 insertions(+), 5 deletions(-) diff --git a/include/my_sys.h b/include/my_sys.h index b54e2581bd4..7ead643a9c6 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -609,6 +609,7 @@ extern uint dirname_part(my_string to,const char *name); extern uint dirname_length(const char *name); #define base_name(A) (A+dirname_length(A)) extern int test_if_hard_path(const char *dir_name); +extern my_bool has_path(const char *name); extern char *convert_dirname(char *to, const char *from, const char *from_end); extern void to_unix_path(my_string name); extern my_string fn_ext(const char *name); diff --git a/myisammrg/myrg_open.c b/myisammrg/myrg_open.c index a59ccb7d966..0dc2f4f9768 100644 --- a/myisammrg/myrg_open.c +++ b/myisammrg/myrg_open.c @@ -80,7 +80,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) continue; /* Skip comments */ } - if (!test_if_hard_path(buff)) + if (!has_path(buff)) { VOID(strmake(name_buff+dir_length,buff, sizeof(name_buff)-1-dir_length)); diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c index d6f647254e8..89f949eca27 100644 --- a/mysys/my_getwd.c +++ b/mysys/my_getwd.c @@ -192,3 +192,25 @@ int test_if_hard_path(register const char *dir_name) return FALSE; #endif } /* test_if_hard_path */ + + +/* + Test if a name contains an (absolute or relative) path. + + SYNOPSIS + has_path() + name The name to test. + + RETURN + TRUE name contains a path. + FALSE name does not contain a path. +*/ + +my_bool has_path(const char *name) +{ + return test(strchr(name, FN_LIBCHAR)) +#ifdef FN_DEVCHAR + || test(strchr(name, FN_DEVCHAR)) +#endif + ; +} diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index bf47b4625e0..7a5d4fcf0a1 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -381,6 +381,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form, char buff[FN_REFLEN],**table_names,**pos; TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first; THD *thd= current_thd; + uint dirlgt= dirname_length(name); DBUG_ENTER("ha_myisammrg::create"); if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)* @@ -394,11 +395,30 @@ int ha_myisammrg::create(const char *name, register TABLE *form, tbl= find_temporary_table(thd, tables->db, tables->real_name); if (!tbl) { - uint length= my_snprintf(buff,FN_REFLEN,"%s%s/%s", - mysql_real_data_home, + /* + Construct the path to the MyISAM table. Try to meet two conditions: + 1.) Allow to include MyISAM tables from different databases, and + 2.) allow for moving DATADIR around in the file system. + The first means that we need paths in the .MRG file. The second + means that we should not have absolute paths in the .MRG file. + The best, we can do, is to use 'mysql_data_home', which is '.' + in mysqld and may be an absolute path in an embedded server. + This means that it might not be possible to move the DATADIR of + an embedded server without changing the paths in the .MRG file. + */ + uint length= my_snprintf(buff, FN_REFLEN, "%s/%s/%s", mysql_data_home, tables->db, tables->real_name); - if (!(table_name= thd->strmake(buff, length))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); + /* + If a MyISAM table is in the same directory as the MERGE table, + we use the table name without a path. This means that the + DATADIR can easily be moved even for an embedded server as long + as the MyISAM tables are from the same database as the MERGE table. + */ + if ((dirname_length(buff) == dirlgt) && ! memcmp(buff, name, dirlgt)) + table_name= tables->real_name; + else + if (! (table_name= thd->strmake(buff, length))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } else table_name=(*tbl)->path; From b6ad57e0a37b43d20a58f9dc74258f2dc7544ea2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 29 Apr 2005 11:37:47 +0200 Subject: [PATCH 10/11] Fix for avoiding gettin Invalid schema object version when doing local changes, fixed found bug --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 181ae5e5b54..a1166641f7d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3266,7 +3266,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) { invalidate_dictionary_cache(FALSE); - if (!(tab= dict->getTable(m_tabname))) + if (!(tab= dict->getTable(m_tabname, &tab_info))) ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); From 27fa1254dcbfd0df2d2b7429956fcc39fc4602cd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 29 Apr 2005 21:19:39 +0400 Subject: [PATCH 11/11] Fix bug #9703 "Error 1032 with GROUP BY query and large tables" Reset old error if tmp table was successfully created. Test data is large and can be found in bug report along with test query. sql/sql_select.cc: Fix bug #9703 BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 2 ++ sql/sql_select.cc | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index e4de8c6ebf2..330d28daf52 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -45,6 +45,8 @@ dlenev@build.mysql.com dlenev@jabberwock.localdomain dlenev@mysql.com ejonore@mc03.ndb.mysql.com +evgen@moonbone.(none) +evgen@moonbone.local gbichot@quadita2.mysql.com gbichot@quadxeon.mysql.com georg@beethoven.local diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 36efe26dff9..96958db2cce 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -6891,6 +6891,11 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), &join->tmp_table_param, error, 0)) DBUG_RETURN(-1); + /* + If table->file->write_row() was failed because of 'out of memory' + and tmp table succesfully created, reset error. + */ + error=0; } if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) {