diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result
index 00436019f85..a6ad95570f8 100644
--- a/mysql-test/r/distinct.result
+++ b/mysql-test/r/distinct.result
@@ -512,3 +512,24 @@ id IFNULL(dsc, '-')
2 line number two
3 line number three
drop table t1;
+CREATE TABLE t1 (
+ID int(11) NOT NULL auto_increment,
+x varchar(20) default NULL,
+y decimal(10,0) default NULL,
+PRIMARY KEY (ID),
+KEY (y)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES
+(1,'ba','-1'),
+(2,'ba','1150'),
+(306,'ba','-1'),
+(307,'ba','1150'),
+(611,'ba','-1'),
+(612,'ba','1150');
+select count(distinct x,y) from t1;
+count(distinct x,y)
+2
+select count(distinct concat(x,y)) from t1;
+count(distinct concat(x,y))
+2
+drop table t1;
diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result
index b8c2c58aac4..ac93f15dac3 100644
--- a/mysql-test/r/ndb_lock.result
+++ b/mysql-test/r/ndb_lock.result
@@ -63,3 +63,14 @@ pk u o
5 5 5
insert into t1 values (1,1,1);
drop table t1;
+create table t3 (id2 int) engine=ndb;
+lock tables t3 write;
+unlock tables;
+drop table t3;
+create table t2 (id int, j int) engine=ndb;
+insert into t2 values (2, 2);
+create table t3 (id int) engine=ndb;
+lock tables t3 read;
+delete t2 from t2, t3 where t2.id = t3.id;
+unlock tables;
+drop table t2, t3;
diff --git a/mysql-test/r/rpl_trunc_temp.result b/mysql-test/r/rpl_trunc_temp.result
new file mode 100644
index 00000000000..44624a38875
--- /dev/null
+++ b/mysql-test/r/rpl_trunc_temp.result
@@ -0,0 +1,22 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+create temporary table t1 (n int);
+insert into t1 values(1);
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+delete from t1;
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+truncate t1;
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 0
diff --git a/mysql-test/r/xml.result b/mysql-test/r/xml.result
index 18516277c3c..bb7a158593c 100644
--- a/mysql-test/r/xml.result
+++ b/mysql-test/r/xml.result
@@ -561,6 +561,42 @@ A B C
select extractvalue('A','/A_B');
extractvalue('A','/A_B')
A
+select extractvalue('AB1B2','/a/b[position()]');
+extractvalue('AB1B2','/a/b[position()]')
+B1 B2
+select extractvalue('AB1B2','/a/b[count(.)=last()]');
+extractvalue('AB1B2','/a/b[count(.)=last()]')
+B1 B2
+select extractvalue('AB1B2','/a/b[last()]');
+extractvalue('AB1B2','/a/b[last()]')
+B2
+select extractvalue('AB1B2','/a/b[last()-1]');
+extractvalue('AB1B2','/a/b[last()-1]')
+B1
+select extractvalue('AB1B2','/a/b[last()=1]');
+extractvalue('AB1B2','/a/b[last()=1]')
+
+select extractvalue('AB1B2','/a/b[last()=2]');
+extractvalue('AB1B2','/a/b[last()=2]')
+B1 B2
+select extractvalue('AB1B2','/a/b[last()=position()]');
+extractvalue('AB1B2','/a/b[last()=position()]')
+B2
+select extractvalue('AB1B2','/a/b[count(.)]');
+extractvalue('AB1B2','/a/b[count(.)]')
+B2
+select extractvalue('AB1B2','/a/b[count(.)-1]');
+extractvalue('AB1B2','/a/b[count(.)-1]')
+B1
+select extractvalue('AB1B2','/a/b[count(.)=1]');
+extractvalue('AB1B2','/a/b[count(.)=1]')
+
+select extractvalue('AB1B2','/a/b[count(.)=2]');
+extractvalue('AB1B2','/a/b[count(.)=2]')
+B1 B2
+select extractvalue('AB1B2','/a/b[count(.)=position()]');
+extractvalue('AB1B2','/a/b[count(.)=position()]')
+B2
select extractvalue('Jack','/a[contains(../a,"J")]');
extractvalue('Jack','/a[contains(../a,"J")]')
Jack
diff --git a/mysql-test/t/distinct.test b/mysql-test/t/distinct.test
index 6483284633f..45bd0c7a51c 100644
--- a/mysql-test/t/distinct.test
+++ b/mysql-test/t/distinct.test
@@ -358,3 +358,27 @@ select distinct id, IFNULL(dsc, '-') from t1;
drop table t1;
# End of 4.1 tests
+
+
+#
+# Bug #15745 ( COUNT(DISTINCT CONCAT(x,y)) returns wrong result)
+#
+CREATE TABLE t1 (
+ ID int(11) NOT NULL auto_increment,
+ x varchar(20) default NULL,
+ y decimal(10,0) default NULL,
+ PRIMARY KEY (ID),
+ KEY (y)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+
+INSERT INTO t1 VALUES
+(1,'ba','-1'),
+(2,'ba','1150'),
+(306,'ba','-1'),
+(307,'ba','1150'),
+(611,'ba','-1'),
+(612,'ba','1150');
+
+select count(distinct x,y) from t1;
+select count(distinct concat(x,y)) from t1;
+drop table t1;
diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test
index 6945f91ee39..3d8597dcc45 100644
--- a/mysql-test/t/ndb_lock.test
+++ b/mysql-test/t/ndb_lock.test
@@ -70,3 +70,39 @@ insert into t1 values (1,1,1);
drop table t1;
# End of 4.1 tests
+
+#
+# Bug #17812 Previous lock table for write causes "stray" lock
+# although table is recreated
+#
+# this creating, locking, and dropping causes a subsequent hang
+# on the delete below waiting for table t2 the locking in the
+# "other" connection is relevant, as without it there is no problem
+#
+connection con1;
+create table t3 (id2 int) engine=ndb;
+
+connection con2;
+lock tables t3 write;
+unlock tables;
+
+connection con1;
+drop table t3;
+
+connection con1;
+create table t2 (id int, j int) engine=ndb;
+insert into t2 values (2, 2);
+create table t3 (id int) engine=ndb;
+
+connection con2;
+lock tables t3 read;
+
+connection con1;
+# here we get a hang before bugfix although we shouldn't
+delete t2 from t2, t3 where t2.id = t3.id;
+
+connection con2;
+unlock tables;
+
+connection con1;
+drop table t2, t3;
diff --git a/mysql-test/t/rpl_trunc_temp.test b/mysql-test/t/rpl_trunc_temp.test
new file mode 100644
index 00000000000..56f858dc9a2
--- /dev/null
+++ b/mysql-test/t/rpl_trunc_temp.test
@@ -0,0 +1,35 @@
+# Requires statement logging
+-- source include/have_binlog_format_statement.inc
+
+source include/master-slave.inc;
+
+#
+# Bug#17137 Running "truncate table" on temporary table
+# leaves the table open on a slave
+#
+
+create temporary table t1 (n int);
+insert into t1 values(1);
+sync_slave_with_master;
+show status like 'Slave_open_temp_tables';
+
+# Perform a delete from temp table
+connection master;
+delete from t1;
+sync_slave_with_master;
+show status like 'Slave_open_temp_tables';
+
+# Perform truncate on temp table
+connection master;
+truncate t1;
+sync_slave_with_master;
+show status like 'Slave_open_temp_tables';
+
+# Disconnect the master, temp table on slave should dissapear
+disconnect master;
+--real_sleep 3 # time for DROP to be read by slave
+connection slave;
+show status like 'Slave_open_temp_tables';
+
+
+# End of 4.1 tests
diff --git a/mysql-test/t/xml.test b/mysql-test/t/xml.test
index f7ce987f29d..831867937e4 100644
--- a/mysql-test/t/xml.test
+++ b/mysql-test/t/xml.test
@@ -255,6 +255,21 @@ select extractvalue('ABC','/a/descendant-or-self::*');
#
select extractvalue('A','/A_B');
+#
+# Bug#16318: XML: extractvalue() incorrectly returns last() = 1
+#
+select extractvalue('AB1B2','/a/b[position()]');
+select extractvalue('AB1B2','/a/b[count(.)=last()]');
+select extractvalue('AB1B2','/a/b[last()]');
+select extractvalue('AB1B2','/a/b[last()-1]');
+select extractvalue('AB1B2','/a/b[last()=1]');
+select extractvalue('AB1B2','/a/b[last()=2]');
+select extractvalue('AB1B2','/a/b[last()=position()]');
+select extractvalue('AB1B2','/a/b[count(.)]');
+select extractvalue('AB1B2','/a/b[count(.)-1]');
+select extractvalue('AB1B2','/a/b[count(.)=1]');
+select extractvalue('AB1B2','/a/b[count(.)=2]');
+select extractvalue('AB1B2','/a/b[count(.)=position()]');
#
# Bug#16316: XML: extractvalue() is case-sensitive with contains()
#
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 6e01330a85e..93894487ee5 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -466,60 +466,58 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
# The mapped error code
*/
-int
-ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
- const char *dbname, const char *tabname,
- bool global)
+int ha_ndbcluster::invalidate_dictionary_cache(bool global)
{
- NDBDICT *dict= ndb->getDictionary();
+ NDBDICT *dict= get_ndb()->getDictionary();
DBUG_ENTER("invalidate_dictionary_cache");
- DBUG_PRINT("info", ("invalidating %s", tabname));
-
-#ifdef HAVE_NDB_BINLOG
- char key[FN_REFLEN];
- build_table_filename(key, sizeof(key), dbname, tabname, "");
- DBUG_PRINT("info", ("Getting ndbcluster mutex"));
- pthread_mutex_lock(&ndbcluster_mutex);
- NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
- (byte*) key, strlen(key));
- pthread_mutex_unlock(&ndbcluster_mutex);
- DBUG_PRINT("info", ("Released ndbcluster mutex"));
- // Only binlog_thread is allowed to globally invalidate a table
- if (global && ndb_share && ndb_share->op && (current_thd != injector_thd))
- DBUG_RETURN(1);
-#endif
+ DBUG_PRINT("info", ("m_tabname: %s global: %d", m_tabname, global));
if (global)
{
- const NDBTAB *tab= dict->getTable(tabname);
+#ifdef HAVE_NDB_BINLOG
+ if (current_thd != injector_thd)
+ {
+ char key[FN_REFLEN];
+ build_table_filename(key, sizeof(key), m_dbname, m_tabname, "");
+ DBUG_PRINT("info", ("Getting ndbcluster mutex"));
+ pthread_mutex_lock(&ndbcluster_mutex);
+ NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
+ (byte*) key, strlen(key));
+ // Only binlog_thread is allowed to globally invalidate a table
+ if (ndb_share && ndb_share->op)
+ {
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_PRINT("info", ("Released ndbcluster mutex"));
+ DBUG_RETURN(1);
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_PRINT("info", ("Released ndbcluster mutex"));
+ }
+#endif
+ const NDBTAB *tab= dict->getTable(m_tabname);
if (!tab)
DBUG_RETURN(1);
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
// Global cache has already been invalidated
- dict->removeCachedTable(tabname);
+ dict->removeCachedTable(m_tabname);
global= FALSE;
+ DBUG_PRINT("info", ("global: %d", global));
}
else
- dict->invalidateTable(tabname);
+ dict->invalidateTable(m_tabname);
+ table_share->version= 0L; /* Free when thread is ready */
}
else
- dict->removeCachedTable(tabname);
- share->version=0L; /* Free when thread is ready */
- DBUG_RETURN(0);
-}
+ dict->removeCachedTable(m_tabname);
-void ha_ndbcluster::invalidate_dictionary_cache(bool global)
-{
- NDBDICT *dict= get_ndb()->getDictionary();
- if (invalidate_dictionary_cache(table_share, get_ndb(), m_dbname, m_tabname, global))
- return;
/* Invalidate indexes */
for (uint i= 0; i < table_share->keys; i++)
{
NDBINDEX *index = (NDBINDEX *) m_index[i].index;
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
- if (!index && !unique_index) continue;
+ if (!index && !unique_index)
+ continue;
NDB_INDEX_TYPE idx_type= m_index[i].type;
switch (idx_type) {
@@ -546,6 +544,7 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global)
break;
}
}
+ DBUG_RETURN(0);
}
int ha_ndbcluster::ndb_err(NdbTransaction *trans)
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 7498d2bb624..7fa50fef060 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -685,9 +685,6 @@ static void set_tabname(const char *pathname, char *tabname);
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
- static int invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
- const char *dbname, const char *tabname,
- bool global);
private:
friend int ndbcluster_drop_database_impl(const char *path);
@@ -774,7 +771,7 @@ private:
void print_results();
ulonglong get_auto_increment();
- void invalidate_dictionary_cache(bool global);
+ int invalidate_dictionary_cache(bool global);
int ndb_err(NdbTransaction*);
bool uses_blob_value();
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index dc1cf6cc8b7..1ff82af1e2d 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -2564,9 +2564,9 @@ bool Item_sum_count_distinct::setup(THD *thd)
Field *f= *field;
enum enum_field_types type= f->type();
tree_key_length+= f->pack_length();
- if (!f->binary() && (type == MYSQL_TYPE_STRING ||
- type == MYSQL_TYPE_VAR_STRING ||
- type == MYSQL_TYPE_VARCHAR))
+ if ((type == MYSQL_TYPE_VARCHAR) ||
+ !f->binary() && (type == MYSQL_TYPE_STRING ||
+ type == MYSQL_TYPE_VAR_STRING))
{
all_binary= FALSE;
break;
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 8ad7e2f9661..7378be0ac4c 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -30,7 +30,6 @@
2. add nodeset_to_nodeset_comparator
3. add lacking functions:
- name()
- - last()
- lang()
- string()
- id()
@@ -75,6 +74,7 @@ typedef struct my_xpath_flt_st
{
uint num; /* absolute position in MY_XML_NODE array */
uint pos; /* relative position in context */
+ uint size; /* context size */
} MY_XPATH_FLT;
@@ -123,6 +123,15 @@ public:
MY_XPATH_FLT add;
add.num= num;
add.pos= pos;
+ add.size= 0;
+ return append_element(&add);
+ }
+ inline bool append_element(uint32 num, uint32 pos, uint32 size)
+ {
+ MY_XPATH_FLT add;
+ add.num= num;
+ add.pos= pos;
+ add.size= size;
return append_element(&add);
}
inline MY_XPATH_FLT *element(uint i)
@@ -455,7 +464,11 @@ public:
void fix_length_and_dec() { max_length=10; }
longlong val_int()
{
+ uint predicate_supplied_context_size;
String *res= args[0]->val_nodeset(&tmp_value);
+ if (res->length() == sizeof(MY_XPATH_FLT) &&
+ (predicate_supplied_context_size= ((MY_XPATH_FLT*)res->ptr())->size))
+ return predicate_supplied_context_size;
return res->length() / sizeof(MY_XPATH_FLT);
}
};
@@ -735,13 +748,15 @@ String *Item_nodeset_func_predicate::val_nodeset(String *str)
{
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
Item_func *comp_func= (Item_func*)args[1];
- uint pos= 0;
+ uint pos= 0, size;
prepare(str);
+ size= fltend - fltbeg;
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
{
nodeset_func->context_cache.length(0);
((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
- flt->pos);
+ flt->pos,
+ size);
if (comp_func->val_int())
((XPathFilter*)str)->append_element(flt->num, pos++);
}
@@ -751,17 +766,19 @@ String *Item_nodeset_func_predicate::val_nodeset(String *str)
String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
{
+ Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
prepare(nodeset);
- int index= args[1]->val_int() - 1;
- if (index >= 0)
+ MY_XPATH_FLT *flt;
+ uint pos, size= fltend - fltbeg;
+ for (pos= 0, flt= fltbeg; flt < fltend; flt++)
{
- MY_XPATH_FLT *flt;
- uint pos;
- for (pos= 0, flt= fltbeg; flt < fltend; flt++)
- {
- if (flt->pos == (uint) index || args[1]->is_bool_func())
- ((XPathFilter*)nodeset)->append_element(flt->num, pos++);
- }
+ nodeset_func->context_cache.length(0);
+ ((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
+ flt->pos,
+ size);
+ int index= args[1]->val_int() - 1;
+ if (index >= 0 && (flt->pos == (uint) index || args[1]->is_bool_func()))
+ ((XPathFilter*)nodeset)->append_element(flt->num, pos++);
}
return nodeset;
}
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 01421be085b..7fca1e64cfb 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -1252,12 +1252,14 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
* Already completed GCI...
* Possible in case of resend during NF handling
*/
+#ifdef VM_TRACE
ndbout << "bucket == 0, gci:" << gci
<< " complete: " << m_complete_data << endl;
for(Uint32 i = 0; i