mirror of
https://github.com/MariaDB/server.git
synced 2025-11-12 10:22:39 +03:00
Merge bk-internal:/home/bk/mysql-5.1-new
into neptunus.(none):/home/msvensson/mysql/mysql-5.1 mysql-test/mysql-test-run.pl: Auto merged
This commit is contained in:
@@ -512,3 +512,24 @@ id IFNULL(dsc, '-')
|
|||||||
2 line number two
|
2 line number two
|
||||||
3 line number three
|
3 line number three
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
ID int(11) NOT NULL auto_increment,
|
||||||
|
x varchar(20) default NULL,
|
||||||
|
y decimal(10,0) default NULL,
|
||||||
|
PRIMARY KEY (ID),
|
||||||
|
KEY (y)
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO t1 VALUES
|
||||||
|
(1,'ba','-1'),
|
||||||
|
(2,'ba','1150'),
|
||||||
|
(306,'ba','-1'),
|
||||||
|
(307,'ba','1150'),
|
||||||
|
(611,'ba','-1'),
|
||||||
|
(612,'ba','1150');
|
||||||
|
select count(distinct x,y) from t1;
|
||||||
|
count(distinct x,y)
|
||||||
|
2
|
||||||
|
select count(distinct concat(x,y)) from t1;
|
||||||
|
count(distinct concat(x,y))
|
||||||
|
2
|
||||||
|
drop table t1;
|
||||||
|
|||||||
@@ -63,3 +63,14 @@ pk u o
|
|||||||
5 5 5
|
5 5 5
|
||||||
insert into t1 values (1,1,1);
|
insert into t1 values (1,1,1);
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t3 (id2 int) engine=ndb;
|
||||||
|
lock tables t3 write;
|
||||||
|
unlock tables;
|
||||||
|
drop table t3;
|
||||||
|
create table t2 (id int, j int) engine=ndb;
|
||||||
|
insert into t2 values (2, 2);
|
||||||
|
create table t3 (id int) engine=ndb;
|
||||||
|
lock tables t3 read;
|
||||||
|
delete t2 from t2, t3 where t2.id = t3.id;
|
||||||
|
unlock tables;
|
||||||
|
drop table t2, t3;
|
||||||
|
|||||||
22
mysql-test/r/rpl_trunc_temp.result
Normal file
22
mysql-test/r/rpl_trunc_temp.result
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
stop slave;
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
reset master;
|
||||||
|
reset slave;
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
start slave;
|
||||||
|
create temporary table t1 (n int);
|
||||||
|
insert into t1 values(1);
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
Variable_name Value
|
||||||
|
Slave_open_temp_tables 1
|
||||||
|
delete from t1;
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
Variable_name Value
|
||||||
|
Slave_open_temp_tables 1
|
||||||
|
truncate t1;
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
Variable_name Value
|
||||||
|
Slave_open_temp_tables 1
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
Variable_name Value
|
||||||
|
Slave_open_temp_tables 0
|
||||||
@@ -561,6 +561,42 @@ A B C
|
|||||||
select extractvalue('<A_B>A</A_B>','/A_B');
|
select extractvalue('<A_B>A</A_B>','/A_B');
|
||||||
extractvalue('<A_B>A</A_B>','/A_B')
|
extractvalue('<A_B>A</A_B>','/A_B')
|
||||||
A
|
A
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[position()]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[position()]')
|
||||||
|
B1 B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=last()]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=last()]')
|
||||||
|
B1 B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()]')
|
||||||
|
B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()-1]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()-1]')
|
||||||
|
B1
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=1]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=1]')
|
||||||
|
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=2]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=2]')
|
||||||
|
B1 B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=position()]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=position()]')
|
||||||
|
B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)]')
|
||||||
|
B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)-1]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)-1]')
|
||||||
|
B1
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=1]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=1]')
|
||||||
|
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=2]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=2]')
|
||||||
|
B1 B2
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=position()]');
|
||||||
|
extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=position()]')
|
||||||
|
B2
|
||||||
select extractvalue('<a>Jack</a>','/a[contains(../a,"J")]');
|
select extractvalue('<a>Jack</a>','/a[contains(../a,"J")]');
|
||||||
extractvalue('<a>Jack</a>','/a[contains(../a,"J")]')
|
extractvalue('<a>Jack</a>','/a[contains(../a,"J")]')
|
||||||
Jack
|
Jack
|
||||||
|
|||||||
@@ -358,3 +358,27 @@ select distinct id, IFNULL(dsc, '-') from t1;
|
|||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
# End of 4.1 tests
|
# End of 4.1 tests
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug #15745 ( COUNT(DISTINCT CONCAT(x,y)) returns wrong result)
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
ID int(11) NOT NULL auto_increment,
|
||||||
|
x varchar(20) default NULL,
|
||||||
|
y decimal(10,0) default NULL,
|
||||||
|
PRIMARY KEY (ID),
|
||||||
|
KEY (y)
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES
|
||||||
|
(1,'ba','-1'),
|
||||||
|
(2,'ba','1150'),
|
||||||
|
(306,'ba','-1'),
|
||||||
|
(307,'ba','1150'),
|
||||||
|
(611,'ba','-1'),
|
||||||
|
(612,'ba','1150');
|
||||||
|
|
||||||
|
select count(distinct x,y) from t1;
|
||||||
|
select count(distinct concat(x,y)) from t1;
|
||||||
|
drop table t1;
|
||||||
|
|||||||
@@ -70,3 +70,39 @@ insert into t1 values (1,1,1);
|
|||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
# End of 4.1 tests
|
# End of 4.1 tests
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug #17812 Previous lock table for write causes "stray" lock
|
||||||
|
# although table is recreated
|
||||||
|
#
|
||||||
|
# this creating, locking, and dropping causes a subsequent hang
|
||||||
|
# on the delete below waiting for table t2 the locking in the
|
||||||
|
# "other" connection is relevant, as without it there is no problem
|
||||||
|
#
|
||||||
|
connection con1;
|
||||||
|
create table t3 (id2 int) engine=ndb;
|
||||||
|
|
||||||
|
connection con2;
|
||||||
|
lock tables t3 write;
|
||||||
|
unlock tables;
|
||||||
|
|
||||||
|
connection con1;
|
||||||
|
drop table t3;
|
||||||
|
|
||||||
|
connection con1;
|
||||||
|
create table t2 (id int, j int) engine=ndb;
|
||||||
|
insert into t2 values (2, 2);
|
||||||
|
create table t3 (id int) engine=ndb;
|
||||||
|
|
||||||
|
connection con2;
|
||||||
|
lock tables t3 read;
|
||||||
|
|
||||||
|
connection con1;
|
||||||
|
# here we get a hang before bugfix although we shouldn't
|
||||||
|
delete t2 from t2, t3 where t2.id = t3.id;
|
||||||
|
|
||||||
|
connection con2;
|
||||||
|
unlock tables;
|
||||||
|
|
||||||
|
connection con1;
|
||||||
|
drop table t2, t3;
|
||||||
|
|||||||
35
mysql-test/t/rpl_trunc_temp.test
Normal file
35
mysql-test/t/rpl_trunc_temp.test
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Requires statement logging
|
||||||
|
-- source include/have_binlog_format_statement.inc
|
||||||
|
|
||||||
|
source include/master-slave.inc;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#17137 Running "truncate table" on temporary table
|
||||||
|
# leaves the table open on a slave
|
||||||
|
#
|
||||||
|
|
||||||
|
create temporary table t1 (n int);
|
||||||
|
insert into t1 values(1);
|
||||||
|
sync_slave_with_master;
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
|
||||||
|
# Perform a delete from temp table
|
||||||
|
connection master;
|
||||||
|
delete from t1;
|
||||||
|
sync_slave_with_master;
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
|
||||||
|
# Perform truncate on temp table
|
||||||
|
connection master;
|
||||||
|
truncate t1;
|
||||||
|
sync_slave_with_master;
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
|
||||||
|
# Disconnect the master, temp table on slave should dissapear
|
||||||
|
disconnect master;
|
||||||
|
--real_sleep 3 # time for DROP to be read by slave
|
||||||
|
connection slave;
|
||||||
|
show status like 'Slave_open_temp_tables';
|
||||||
|
|
||||||
|
|
||||||
|
# End of 4.1 tests
|
||||||
@@ -255,6 +255,21 @@ select extractvalue('<a>A<b>B<c>C</c></b></a>','/a/descendant-or-self::*');
|
|||||||
#
|
#
|
||||||
select extractvalue('<A_B>A</A_B>','/A_B');
|
select extractvalue('<A_B>A</A_B>','/A_B');
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#16318: XML: extractvalue() incorrectly returns last() = 1
|
||||||
|
#
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[position()]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=last()]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()-1]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=1]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=2]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[last()=position()]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)-1]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=1]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=2]');
|
||||||
|
select extractvalue('<a>A<b>B1</b><b>B2</b></a>','/a/b[count(.)=position()]');
|
||||||
#
|
#
|
||||||
# Bug#16316: XML: extractvalue() is case-sensitive with contains()
|
# Bug#16316: XML: extractvalue() is case-sensitive with contains()
|
||||||
#
|
#
|
||||||
|
|||||||
@@ -466,60 +466,58 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
|
|||||||
# The mapped error code
|
# The mapped error code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int
|
int ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
||||||
ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
|
|
||||||
const char *dbname, const char *tabname,
|
|
||||||
bool global)
|
|
||||||
{
|
{
|
||||||
NDBDICT *dict= ndb->getDictionary();
|
NDBDICT *dict= get_ndb()->getDictionary();
|
||||||
DBUG_ENTER("invalidate_dictionary_cache");
|
DBUG_ENTER("invalidate_dictionary_cache");
|
||||||
DBUG_PRINT("info", ("invalidating %s", tabname));
|
DBUG_PRINT("info", ("m_tabname: %s global: %d", m_tabname, global));
|
||||||
|
|
||||||
#ifdef HAVE_NDB_BINLOG
|
|
||||||
char key[FN_REFLEN];
|
|
||||||
build_table_filename(key, sizeof(key), dbname, tabname, "");
|
|
||||||
DBUG_PRINT("info", ("Getting ndbcluster mutex"));
|
|
||||||
pthread_mutex_lock(&ndbcluster_mutex);
|
|
||||||
NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
|
|
||||||
(byte*) key, strlen(key));
|
|
||||||
pthread_mutex_unlock(&ndbcluster_mutex);
|
|
||||||
DBUG_PRINT("info", ("Released ndbcluster mutex"));
|
|
||||||
// Only binlog_thread is allowed to globally invalidate a table
|
|
||||||
if (global && ndb_share && ndb_share->op && (current_thd != injector_thd))
|
|
||||||
DBUG_RETURN(1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (global)
|
if (global)
|
||||||
{
|
{
|
||||||
const NDBTAB *tab= dict->getTable(tabname);
|
#ifdef HAVE_NDB_BINLOG
|
||||||
|
if (current_thd != injector_thd)
|
||||||
|
{
|
||||||
|
char key[FN_REFLEN];
|
||||||
|
build_table_filename(key, sizeof(key), m_dbname, m_tabname, "");
|
||||||
|
DBUG_PRINT("info", ("Getting ndbcluster mutex"));
|
||||||
|
pthread_mutex_lock(&ndbcluster_mutex);
|
||||||
|
NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
|
||||||
|
(byte*) key, strlen(key));
|
||||||
|
// Only binlog_thread is allowed to globally invalidate a table
|
||||||
|
if (ndb_share && ndb_share->op)
|
||||||
|
{
|
||||||
|
pthread_mutex_unlock(&ndbcluster_mutex);
|
||||||
|
DBUG_PRINT("info", ("Released ndbcluster mutex"));
|
||||||
|
DBUG_RETURN(1);
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(&ndbcluster_mutex);
|
||||||
|
DBUG_PRINT("info", ("Released ndbcluster mutex"));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
const NDBTAB *tab= dict->getTable(m_tabname);
|
||||||
if (!tab)
|
if (!tab)
|
||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||||
{
|
{
|
||||||
// Global cache has already been invalidated
|
// Global cache has already been invalidated
|
||||||
dict->removeCachedTable(tabname);
|
dict->removeCachedTable(m_tabname);
|
||||||
global= FALSE;
|
global= FALSE;
|
||||||
|
DBUG_PRINT("info", ("global: %d", global));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
dict->invalidateTable(tabname);
|
dict->invalidateTable(m_tabname);
|
||||||
|
table_share->version= 0L; /* Free when thread is ready */
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
dict->removeCachedTable(tabname);
|
dict->removeCachedTable(m_tabname);
|
||||||
share->version=0L; /* Free when thread is ready */
|
|
||||||
DBUG_RETURN(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
|
||||||
{
|
|
||||||
NDBDICT *dict= get_ndb()->getDictionary();
|
|
||||||
if (invalidate_dictionary_cache(table_share, get_ndb(), m_dbname, m_tabname, global))
|
|
||||||
return;
|
|
||||||
/* Invalidate indexes */
|
/* Invalidate indexes */
|
||||||
for (uint i= 0; i < table_share->keys; i++)
|
for (uint i= 0; i < table_share->keys; i++)
|
||||||
{
|
{
|
||||||
NDBINDEX *index = (NDBINDEX *) m_index[i].index;
|
NDBINDEX *index = (NDBINDEX *) m_index[i].index;
|
||||||
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
|
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
|
||||||
if (!index && !unique_index) continue;
|
if (!index && !unique_index)
|
||||||
|
continue;
|
||||||
NDB_INDEX_TYPE idx_type= m_index[i].type;
|
NDB_INDEX_TYPE idx_type= m_index[i].type;
|
||||||
|
|
||||||
switch (idx_type) {
|
switch (idx_type) {
|
||||||
@@ -546,6 +544,7 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_ndbcluster::ndb_err(NdbTransaction *trans)
|
int ha_ndbcluster::ndb_err(NdbTransaction *trans)
|
||||||
|
|||||||
@@ -685,9 +685,6 @@ static void set_tabname(const char *pathname, char *tabname);
|
|||||||
|
|
||||||
bool check_if_incompatible_data(HA_CREATE_INFO *info,
|
bool check_if_incompatible_data(HA_CREATE_INFO *info,
|
||||||
uint table_changes);
|
uint table_changes);
|
||||||
static int invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
|
|
||||||
const char *dbname, const char *tabname,
|
|
||||||
bool global);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend int ndbcluster_drop_database_impl(const char *path);
|
friend int ndbcluster_drop_database_impl(const char *path);
|
||||||
@@ -774,7 +771,7 @@ private:
|
|||||||
void print_results();
|
void print_results();
|
||||||
|
|
||||||
ulonglong get_auto_increment();
|
ulonglong get_auto_increment();
|
||||||
void invalidate_dictionary_cache(bool global);
|
int invalidate_dictionary_cache(bool global);
|
||||||
int ndb_err(NdbTransaction*);
|
int ndb_err(NdbTransaction*);
|
||||||
bool uses_blob_value();
|
bool uses_blob_value();
|
||||||
|
|
||||||
|
|||||||
@@ -2564,9 +2564,9 @@ bool Item_sum_count_distinct::setup(THD *thd)
|
|||||||
Field *f= *field;
|
Field *f= *field;
|
||||||
enum enum_field_types type= f->type();
|
enum enum_field_types type= f->type();
|
||||||
tree_key_length+= f->pack_length();
|
tree_key_length+= f->pack_length();
|
||||||
if (!f->binary() && (type == MYSQL_TYPE_STRING ||
|
if ((type == MYSQL_TYPE_VARCHAR) ||
|
||||||
type == MYSQL_TYPE_VAR_STRING ||
|
!f->binary() && (type == MYSQL_TYPE_STRING ||
|
||||||
type == MYSQL_TYPE_VARCHAR))
|
type == MYSQL_TYPE_VAR_STRING))
|
||||||
{
|
{
|
||||||
all_binary= FALSE;
|
all_binary= FALSE;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -30,7 +30,6 @@
|
|||||||
2. add nodeset_to_nodeset_comparator
|
2. add nodeset_to_nodeset_comparator
|
||||||
3. add lacking functions:
|
3. add lacking functions:
|
||||||
- name()
|
- name()
|
||||||
- last()
|
|
||||||
- lang()
|
- lang()
|
||||||
- string()
|
- string()
|
||||||
- id()
|
- id()
|
||||||
@@ -75,6 +74,7 @@ typedef struct my_xpath_flt_st
|
|||||||
{
|
{
|
||||||
uint num; /* absolute position in MY_XML_NODE array */
|
uint num; /* absolute position in MY_XML_NODE array */
|
||||||
uint pos; /* relative position in context */
|
uint pos; /* relative position in context */
|
||||||
|
uint size; /* context size */
|
||||||
} MY_XPATH_FLT;
|
} MY_XPATH_FLT;
|
||||||
|
|
||||||
|
|
||||||
@@ -123,6 +123,15 @@ public:
|
|||||||
MY_XPATH_FLT add;
|
MY_XPATH_FLT add;
|
||||||
add.num= num;
|
add.num= num;
|
||||||
add.pos= pos;
|
add.pos= pos;
|
||||||
|
add.size= 0;
|
||||||
|
return append_element(&add);
|
||||||
|
}
|
||||||
|
inline bool append_element(uint32 num, uint32 pos, uint32 size)
|
||||||
|
{
|
||||||
|
MY_XPATH_FLT add;
|
||||||
|
add.num= num;
|
||||||
|
add.pos= pos;
|
||||||
|
add.size= size;
|
||||||
return append_element(&add);
|
return append_element(&add);
|
||||||
}
|
}
|
||||||
inline MY_XPATH_FLT *element(uint i)
|
inline MY_XPATH_FLT *element(uint i)
|
||||||
@@ -455,7 +464,11 @@ public:
|
|||||||
void fix_length_and_dec() { max_length=10; }
|
void fix_length_and_dec() { max_length=10; }
|
||||||
longlong val_int()
|
longlong val_int()
|
||||||
{
|
{
|
||||||
|
uint predicate_supplied_context_size;
|
||||||
String *res= args[0]->val_nodeset(&tmp_value);
|
String *res= args[0]->val_nodeset(&tmp_value);
|
||||||
|
if (res->length() == sizeof(MY_XPATH_FLT) &&
|
||||||
|
(predicate_supplied_context_size= ((MY_XPATH_FLT*)res->ptr())->size))
|
||||||
|
return predicate_supplied_context_size;
|
||||||
return res->length() / sizeof(MY_XPATH_FLT);
|
return res->length() / sizeof(MY_XPATH_FLT);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -735,13 +748,15 @@ String *Item_nodeset_func_predicate::val_nodeset(String *str)
|
|||||||
{
|
{
|
||||||
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
|
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
|
||||||
Item_func *comp_func= (Item_func*)args[1];
|
Item_func *comp_func= (Item_func*)args[1];
|
||||||
uint pos= 0;
|
uint pos= 0, size;
|
||||||
prepare(str);
|
prepare(str);
|
||||||
|
size= fltend - fltbeg;
|
||||||
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
|
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
|
||||||
{
|
{
|
||||||
nodeset_func->context_cache.length(0);
|
nodeset_func->context_cache.length(0);
|
||||||
((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
|
((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
|
||||||
flt->pos);
|
flt->pos,
|
||||||
|
size);
|
||||||
if (comp_func->val_int())
|
if (comp_func->val_int())
|
||||||
((XPathFilter*)str)->append_element(flt->num, pos++);
|
((XPathFilter*)str)->append_element(flt->num, pos++);
|
||||||
}
|
}
|
||||||
@@ -751,17 +766,19 @@ String *Item_nodeset_func_predicate::val_nodeset(String *str)
|
|||||||
|
|
||||||
String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
|
String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
|
||||||
{
|
{
|
||||||
|
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
|
||||||
prepare(nodeset);
|
prepare(nodeset);
|
||||||
int index= args[1]->val_int() - 1;
|
MY_XPATH_FLT *flt;
|
||||||
if (index >= 0)
|
uint pos, size= fltend - fltbeg;
|
||||||
|
for (pos= 0, flt= fltbeg; flt < fltend; flt++)
|
||||||
{
|
{
|
||||||
MY_XPATH_FLT *flt;
|
nodeset_func->context_cache.length(0);
|
||||||
uint pos;
|
((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
|
||||||
for (pos= 0, flt= fltbeg; flt < fltend; flt++)
|
flt->pos,
|
||||||
{
|
size);
|
||||||
if (flt->pos == (uint) index || args[1]->is_bool_func())
|
int index= args[1]->val_int() - 1;
|
||||||
((XPathFilter*)nodeset)->append_element(flt->num, pos++);
|
if (index >= 0 && (flt->pos == (uint) index || args[1]->is_bool_func()))
|
||||||
}
|
((XPathFilter*)nodeset)->append_element(flt->num, pos++);
|
||||||
}
|
}
|
||||||
return nodeset;
|
return nodeset;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1252,12 +1252,14 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
|
|||||||
* Already completed GCI...
|
* Already completed GCI...
|
||||||
* Possible in case of resend during NF handling
|
* Possible in case of resend during NF handling
|
||||||
*/
|
*/
|
||||||
|
#ifdef VM_TRACE
|
||||||
ndbout << "bucket == 0, gci:" << gci
|
ndbout << "bucket == 0, gci:" << gci
|
||||||
<< " complete: " << m_complete_data << endl;
|
<< " complete: " << m_complete_data << endl;
|
||||||
for(Uint32 i = 0; i<m_active_gci.size(); i++)
|
for(Uint32 i = 0; i<m_active_gci.size(); i++)
|
||||||
{
|
{
|
||||||
ndbout << i << " - " << m_active_gci[i] << endl;
|
ndbout << i << " - " << m_active_gci[i] << endl;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
DBUG_VOID_RETURN_EVENT;
|
DBUG_VOID_RETURN_EVENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user