mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-4.1
into poseidon.ndb.mysql.com:/home/tomas/mysql-4.1 sql/ha_ndbcluster.cc: Auto merged
This commit is contained in:
@ -377,3 +377,24 @@ count(*)
|
||||
0
|
||||
drop table t1;
|
||||
drop database mysqltest;
|
||||
set autocommit=1;
|
||||
use test;
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
b text,
|
||||
PRIMARY KEY (a)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 VALUES
|
||||
(1,'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA');
|
||||
INSERT INTO t1 VALUES
|
||||
(2,'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB');
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
2 BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
|
||||
alter table t1 engine=ndb;
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
2 BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
|
||||
drop table t1;
|
||||
|
@ -29,3 +29,44 @@ count(*)
|
||||
select * from t2 limit 0;
|
||||
a b c
|
||||
drop table t2;
|
||||
CREATE TABLE `t2` (
|
||||
`views` int(11) NOT NULL default '0',
|
||||
`clicks` int(11) NOT NULL default '0',
|
||||
`day` date NOT NULL default '0000-00-00',
|
||||
`hour` tinyint(4) NOT NULL default '0',
|
||||
`bannerid` smallint(6) NOT NULL default '0',
|
||||
`zoneid` smallint(6) NOT NULL default '0',
|
||||
`source` varchar(50) NOT NULL default '',
|
||||
PRIMARY KEY (`day`,`hour`,`bannerid`,`zoneid`,`source`),
|
||||
KEY `bannerid_day` (`bannerid`,`day`),
|
||||
KEY `zoneid` (`zoneid`)
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
INSERT INTO `t2` VALUES
|
||||
( 1,0,'2004-09-17', 5,100,100,''),
|
||||
( 1,0,'2004-09-18', 7,100,100,''),
|
||||
( 17,0,'2004-09-27',20,132,100,''),
|
||||
( 4,0,'2004-09-16',23,132,100,''),
|
||||
( 86,0,'2004-09-18', 7,196,196,''),
|
||||
( 11,0,'2004-09-16',16,132,100,''),
|
||||
(140,0,'2004-09-18', 0,100,164,''),
|
||||
( 2,0,'2004-09-17', 7,132,100,''),
|
||||
(846,0,'2004-09-27',11,132,164,''),
|
||||
( 1,0,'2004-09-18', 8,132,100,''),
|
||||
( 22,0,'2004-09-27', 9,164,132,''),
|
||||
(711,0,'2004-09-27', 9,100,164,''),
|
||||
( 11,0,'2004-09-18', 0,196,132,''),
|
||||
( 41,0,'2004-09-27',15,196,132,''),
|
||||
( 57,0,'2004-09-18', 2,164,196,'');
|
||||
SELECT DATE_FORMAT(day, '%Y%m%d') as date, DATE_FORMAT(day, '%d-%m-%Y')
|
||||
as date_formatted FROM t2 GROUP BY day ORDER BY day DESC;
|
||||
date date_formatted
|
||||
20040927 27-09-2004
|
||||
20040918 18-09-2004
|
||||
20040917 17-09-2004
|
||||
20040916 16-09-2004
|
||||
SELECT DATE_FORMAT(day, '%Y%m%d') as date, DATE_FORMAT(day, '%d-%m-%Y')
|
||||
as date_formatted FROM t2 GROUP BY day ORDER BY day DESC LIMIT 2;
|
||||
date date_formatted
|
||||
20040927 27-09-2004
|
||||
20040918 18-09-2004
|
||||
drop table t2;
|
||||
|
@ -308,3 +308,21 @@ rollback;
|
||||
select count(*) from t1;
|
||||
drop table t1;
|
||||
drop database mysqltest;
|
||||
|
||||
set autocommit=1;
|
||||
use test;
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
b text,
|
||||
PRIMARY KEY (a)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO t1 VALUES
|
||||
(1,'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA');
|
||||
INSERT INTO t1 VALUES
|
||||
(2,'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB');
|
||||
|
||||
select * from t1 order by a;
|
||||
alter table t1 engine=ndb;
|
||||
select * from t1 order by a;
|
||||
drop table t1;
|
||||
|
@ -42,3 +42,41 @@ select count(*) from t2 where c=12345678 limit 1000;
|
||||
select * from t2 limit 0;
|
||||
|
||||
drop table t2;
|
||||
|
||||
CREATE TABLE `t2` (
|
||||
`views` int(11) NOT NULL default '0',
|
||||
`clicks` int(11) NOT NULL default '0',
|
||||
`day` date NOT NULL default '0000-00-00',
|
||||
`hour` tinyint(4) NOT NULL default '0',
|
||||
`bannerid` smallint(6) NOT NULL default '0',
|
||||
`zoneid` smallint(6) NOT NULL default '0',
|
||||
`source` varchar(50) NOT NULL default '',
|
||||
PRIMARY KEY (`day`,`hour`,`bannerid`,`zoneid`,`source`),
|
||||
KEY `bannerid_day` (`bannerid`,`day`),
|
||||
KEY `zoneid` (`zoneid`)
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO `t2` VALUES
|
||||
( 1,0,'2004-09-17', 5,100,100,''),
|
||||
( 1,0,'2004-09-18', 7,100,100,''),
|
||||
( 17,0,'2004-09-27',20,132,100,''),
|
||||
( 4,0,'2004-09-16',23,132,100,''),
|
||||
( 86,0,'2004-09-18', 7,196,196,''),
|
||||
( 11,0,'2004-09-16',16,132,100,''),
|
||||
(140,0,'2004-09-18', 0,100,164,''),
|
||||
( 2,0,'2004-09-17', 7,132,100,''),
|
||||
(846,0,'2004-09-27',11,132,164,''),
|
||||
( 1,0,'2004-09-18', 8,132,100,''),
|
||||
( 22,0,'2004-09-27', 9,164,132,''),
|
||||
(711,0,'2004-09-27', 9,100,164,''),
|
||||
( 11,0,'2004-09-18', 0,196,132,''),
|
||||
( 41,0,'2004-09-27',15,196,132,''),
|
||||
( 57,0,'2004-09-18', 2,164,196,'');
|
||||
|
||||
SELECT DATE_FORMAT(day, '%Y%m%d') as date, DATE_FORMAT(day, '%d-%m-%Y')
|
||||
as date_formatted FROM t2 GROUP BY day ORDER BY day DESC;
|
||||
|
||||
SELECT DATE_FORMAT(day, '%Y%m%d') as date, DATE_FORMAT(day, '%d-%m-%Y')
|
||||
as date_formatted FROM t2 GROUP BY day ORDER BY day DESC LIMIT 2;
|
||||
|
||||
drop table t2;
|
||||
|
@ -88,7 +88,7 @@ RestoreMetaData::~RestoreMetaData(){
|
||||
allTables.clear();
|
||||
}
|
||||
|
||||
const TableS *
|
||||
TableS *
|
||||
RestoreMetaData::getTable(Uint32 tableId) const {
|
||||
for(Uint32 i= 0; i < allTables.size(); i++)
|
||||
if(allTables[i]->getTableId() == tableId)
|
||||
@ -201,6 +201,8 @@ TableS::TableS(NdbTableImpl* tableImpl)
|
||||
{
|
||||
m_dictTable = tableImpl;
|
||||
m_noOfNullable = m_nullBitmaskSize = 0;
|
||||
m_auto_val_id= ~(Uint32)0;
|
||||
m_max_auto_val= 0;
|
||||
|
||||
for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
|
||||
createAttr(tableImpl->getColumn(i));
|
||||
@ -269,7 +271,7 @@ int TupleS::getNoOfAttributes() const {
|
||||
return m_currentTable->getNoOfAttributes();
|
||||
};
|
||||
|
||||
const TableS * TupleS::getTable() const {
|
||||
TableS * TupleS::getTable() const {
|
||||
return m_currentTable;
|
||||
};
|
||||
|
||||
@ -282,7 +284,7 @@ AttributeData * TupleS::getData(int i) const{
|
||||
};
|
||||
|
||||
bool
|
||||
TupleS::prepareRecord(const TableS & tab){
|
||||
TupleS::prepareRecord(TableS & tab){
|
||||
if (allAttrData) {
|
||||
if (getNoOfAttributes() == tab.getNoOfAttributes())
|
||||
{
|
||||
@ -698,6 +700,9 @@ void TableS::createAttr(NdbDictionary::Column *column)
|
||||
d->attrId = allAttributesDesc.size();
|
||||
allAttributesDesc.push_back(d);
|
||||
|
||||
if (d->m_column->getAutoIncrement())
|
||||
m_auto_val_id= d->attrId;
|
||||
|
||||
if(d->m_column->getPrimaryKey() /* && not variable */)
|
||||
{
|
||||
m_fixedKeys.push_back(d);
|
||||
|
@ -91,9 +91,9 @@ class TupleS {
|
||||
private:
|
||||
friend class RestoreDataIterator;
|
||||
|
||||
const TableS *m_currentTable;
|
||||
TableS *m_currentTable;
|
||||
AttributeData *allAttrData;
|
||||
bool prepareRecord(const TableS &);
|
||||
bool prepareRecord(TableS &);
|
||||
|
||||
public:
|
||||
TupleS() {
|
||||
@ -108,7 +108,7 @@ public:
|
||||
TupleS(const TupleS& tuple); // disable copy constructor
|
||||
TupleS & operator=(const TupleS& tuple);
|
||||
int getNoOfAttributes() const;
|
||||
const TableS * getTable() const;
|
||||
TableS * getTable() const;
|
||||
const AttributeDesc * getDesc(int i) const;
|
||||
AttributeData * getData(int i) const;
|
||||
}; // class TupleS
|
||||
@ -130,6 +130,9 @@ class TableS {
|
||||
Uint32 m_noOfNullable;
|
||||
Uint32 m_nullBitmaskSize;
|
||||
|
||||
Uint32 m_auto_val_id;
|
||||
Uint64 m_max_auto_val;
|
||||
|
||||
int pos;
|
||||
|
||||
void createAttr(NdbDictionary::Column *column);
|
||||
@ -170,6 +173,42 @@ public:
|
||||
return allAttributesDesc.size();
|
||||
};
|
||||
|
||||
bool have_auto_inc() const {
|
||||
return m_auto_val_id != ~(Uint32)0;
|
||||
};
|
||||
|
||||
bool have_auto_inc(Uint32 id) const {
|
||||
return m_auto_val_id == id;
|
||||
};
|
||||
|
||||
Uint64 get_max_auto_val() const {
|
||||
return m_max_auto_val;
|
||||
};
|
||||
|
||||
void update_max_auto_val(const char *data, int size) {
|
||||
Uint64 val= 0;
|
||||
switch(size){
|
||||
case 8:
|
||||
val= *(Uint8*)data;
|
||||
break;
|
||||
case 16:
|
||||
val= *(Uint16*)data;
|
||||
break;
|
||||
case 24:
|
||||
val= (0xffffff)&*(Uint32*)data;
|
||||
break;
|
||||
case 32:
|
||||
val= *(Uint32*)data;
|
||||
break;
|
||||
case 64:
|
||||
val= *(Uint64*)data;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
};
|
||||
if(val > m_max_auto_val)
|
||||
m_max_auto_val= val;
|
||||
};
|
||||
/**
|
||||
* Get attribute descriptor
|
||||
*/
|
||||
@ -245,7 +284,7 @@ public:
|
||||
Uint32 getNoOfTables() const { return allTables.size();}
|
||||
|
||||
const TableS * operator[](int i) const { return allTables[i];}
|
||||
const TableS * getTable(Uint32 tableId) const;
|
||||
TableS * getTable(Uint32 tableId) const;
|
||||
|
||||
Uint32 getStopGCP() const;
|
||||
}; // RestoreMetaData
|
||||
@ -254,7 +293,7 @@ public:
|
||||
class RestoreDataIterator : public BackupFile {
|
||||
const RestoreMetaData & m_metaData;
|
||||
Uint32 m_count;
|
||||
const TableS* m_currentTable;
|
||||
TableS* m_currentTable;
|
||||
TupleS m_tuple;
|
||||
|
||||
public:
|
||||
@ -278,7 +317,7 @@ public:
|
||||
LE_UPDATE
|
||||
};
|
||||
EntryType m_type;
|
||||
const TableS * m_table;
|
||||
TableS * m_table;
|
||||
Vector<AttributeS*> m_values;
|
||||
Vector<AttributeS*> m_values_e;
|
||||
AttributeS *add_attr() {
|
||||
|
@ -30,6 +30,7 @@ public:
|
||||
virtual void endOfTuples(){}
|
||||
virtual void logEntry(const LogEntry &){}
|
||||
virtual void endOfLogEntrys(){}
|
||||
virtual bool finalize_table(const TableS &){return true;}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -130,6 +130,21 @@ BackupRestore::get_table(const NdbDictionary::Table* tab){
|
||||
return m_cache.m_new_table;
|
||||
}
|
||||
|
||||
bool
|
||||
BackupRestore::finalize_table(const TableS & table){
|
||||
bool ret= true;
|
||||
if (!m_restore && !m_restore_meta)
|
||||
return ret;
|
||||
if (table.have_auto_inc())
|
||||
{
|
||||
Uint64 max_val= table.get_max_auto_val();
|
||||
Uint64 auto_val= m_ndb->readAutoIncrementValue(get_table(table.m_dictTable));
|
||||
if (max_val+1 > auto_val || auto_val == ~(Uint64)0)
|
||||
ret= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), max_val+1, false);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
BackupRestore::table(const TableS & table){
|
||||
if (!m_restore && !m_restore_meta)
|
||||
@ -179,6 +194,9 @@ BackupRestore::table(const TableS & table){
|
||||
err << "Unable to find table: " << split[2].c_str() << endl;
|
||||
return false;
|
||||
}
|
||||
if(m_restore_meta){
|
||||
m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false);
|
||||
}
|
||||
const NdbDictionary::Table* null = 0;
|
||||
m_new_tables.fill(table.m_dictTable->getTableId(), null);
|
||||
m_new_tables[table.m_dictTable->getTableId()] = tab;
|
||||
@ -316,6 +334,10 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
|
||||
int arraySize = attr_desc->arraySize;
|
||||
char * dataPtr = attr_data->string_value;
|
||||
Uint32 length = (size * arraySize) / 8;
|
||||
|
||||
if (j == 0 && tup.getTable()->have_auto_inc(i))
|
||||
tup.getTable()->update_max_auto_val(dataPtr,size);
|
||||
|
||||
if (attr_desc->m_column->getPrimaryKey())
|
||||
{
|
||||
if (j == 1) continue;
|
||||
@ -510,6 +532,9 @@ BackupRestore::logEntry(const LogEntry & tup)
|
||||
int arraySize = attr->Desc->arraySize;
|
||||
const char * dataPtr = attr->Data.string_value;
|
||||
|
||||
if (tup.m_table->have_auto_inc(attr->Desc->attrId))
|
||||
tup.m_table->update_max_auto_val(dataPtr,size);
|
||||
|
||||
const Uint32 length = (size / 8) * arraySize;
|
||||
if (attr->Desc->m_column->getPrimaryKey())
|
||||
op->equal(attr->Desc->attrId, dataPtr, length);
|
||||
|
@ -59,6 +59,7 @@ public:
|
||||
virtual void endOfTuples();
|
||||
virtual void logEntry(const LogEntry &);
|
||||
virtual void endOfLogEntrys();
|
||||
virtual bool finalize_table(const TableS &);
|
||||
void connectToMysql();
|
||||
Ndb * m_ndb;
|
||||
bool m_restore;
|
||||
|
@ -355,6 +355,20 @@ main(int argc, const char** argv)
|
||||
logIter.validateFooter(); //not implemented
|
||||
for (i= 0; i < g_consumers.size(); i++)
|
||||
g_consumers[i]->endOfLogEntrys();
|
||||
for(i = 0; i<metaData.getNoOfTables(); i++)
|
||||
{
|
||||
if (checkSysTable(metaData[i]->getTableName()))
|
||||
{
|
||||
for(Uint32 j= 0; j < g_consumers.size(); j++)
|
||||
if (!g_consumers[j]->finalize_table(* metaData[i]))
|
||||
{
|
||||
ndbout_c("Restore: Failed to finalize restore table: %s. "
|
||||
"Exiting...",
|
||||
metaData[i]->getTableName());
|
||||
return -11;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
clearConsumers();
|
||||
|
@ -399,7 +399,7 @@ int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field,
|
||||
*/
|
||||
|
||||
int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
|
||||
uint fieldnr)
|
||||
uint fieldnr, bool *set_blob_value)
|
||||
{
|
||||
const byte* field_ptr= field->ptr;
|
||||
uint32 pack_len= field->pack_length();
|
||||
@ -444,6 +444,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
|
||||
(unsigned)blob_ptr, blob_len));
|
||||
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
|
||||
|
||||
if (set_blob_value)
|
||||
*set_blob_value= true;
|
||||
// No callback needed to write value
|
||||
DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0);
|
||||
}
|
||||
@ -1593,11 +1595,12 @@ int ha_ndbcluster::write_row(byte *record)
|
||||
}
|
||||
|
||||
// Set non-key attribute(s)
|
||||
bool set_blob_value= false;
|
||||
for (i= 0; i < table->fields; i++)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if (!(field->flags & PRI_KEY_FLAG) &&
|
||||
set_ndb_value(op, field, i))
|
||||
set_ndb_value(op, field, i, &set_blob_value))
|
||||
{
|
||||
skip_auto_increment= true;
|
||||
ERR_RETURN(op->getNdbError());
|
||||
@ -1616,7 +1619,7 @@ int ha_ndbcluster::write_row(byte *record)
|
||||
bulk_insert_not_flushed= true;
|
||||
if ((rows_to_insert == 1) ||
|
||||
((rows_inserted % bulk_insert_rows) == 0) ||
|
||||
uses_blob_value(false) != 0)
|
||||
set_blob_value)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
// Send rows to NDB
|
||||
|
@ -210,7 +210,7 @@ class ha_ndbcluster: public handler
|
||||
uint fieldnr, const byte* field_ptr);
|
||||
int set_ndb_key(NdbOperation*, Field *field,
|
||||
uint fieldnr, const byte* field_ptr);
|
||||
int set_ndb_value(NdbOperation*, Field *field, uint fieldnr);
|
||||
int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, bool *set_blob_value= 0);
|
||||
int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*);
|
||||
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
|
||||
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
|
||||
|
@ -6923,7 +6923,10 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
|
||||
reverse=flag; // Remember if reverse
|
||||
key_part++;
|
||||
}
|
||||
*used_key_parts= (uint) (key_part - table->key_info[idx].key_part);
|
||||
uint tmp= (uint) (key_part - table->key_info[idx].key_part);
|
||||
if (reverse == -1 && !(table->file->index_flags(idx,tmp-1, 1) & HA_READ_PREV))
|
||||
DBUG_RETURN(0);
|
||||
*used_key_parts= tmp;
|
||||
DBUG_RETURN(reverse);
|
||||
}
|
||||
|
||||
@ -7120,10 +7123,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
*/
|
||||
if (!select->quick->reverse_sorted())
|
||||
{
|
||||
// here used_key_parts >0
|
||||
if (!(table->file->index_flags(ref_key,used_key_parts-1, 1)
|
||||
& HA_READ_PREV))
|
||||
DBUG_RETURN(0); // Use filesort
|
||||
// ORDER BY range_key DESC
|
||||
QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
|
||||
used_key_parts);
|
||||
@ -7144,9 +7143,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
Use a traversal function that starts by reading the last row
|
||||
with key part (A) and then traverse the index backwards.
|
||||
*/
|
||||
if (!(table->file->index_flags(ref_key,used_key_parts-1, 1)
|
||||
& HA_READ_PREV))
|
||||
DBUG_RETURN(0); // Use filesort
|
||||
tab->read_first_record= join_read_last_key;
|
||||
tab->read_record.read_record= join_read_prev_same;
|
||||
/* fall through */
|
||||
@ -7192,7 +7188,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
if (keys.is_set(nr))
|
||||
{
|
||||
int flag;
|
||||
if ((flag=test_if_order_by_key(order, table, nr, ¬_used)))
|
||||
if (flag=test_if_order_by_key(order, table, nr, ¬_used))
|
||||
{
|
||||
if (!no_changes)
|
||||
{
|
||||
|
Reference in New Issue
Block a user