1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-27 21:01:50 +03:00

Improve batch inserts.

1) Instead of making dbrm calls to writeVBEntry() per block,
     we make these calls per batch. This can have non-trivial
     reductions in the overhead of these calls if the batch size
     is large.

  2) In dmlproc, do not deserialize the whole insertpackage, which
     consists of the complete record set per column, which would be
     wasteful as we only need some metadata fields from insertpackage
     here. This is only done for batch inserts at the moment, this
     should also be applied to single inserts.
This commit is contained in:
Gagan Goel
2020-05-10 19:38:06 -04:00
parent 04fdacb927
commit d4d0ebdf5d
13 changed files with 306 additions and 36 deletions

View File

@ -2226,6 +2226,42 @@ int DBRM::writeVBEntry(VER_t transID, LBID_t lbid, OID_t vbOID,
return err;
}
int DBRM::bulkWriteVBEntry(VER_t transID,
const std::vector<BRM::LBID_t>& lbids,
OID_t vbOID,
const std::vector<uint32_t>& vbFBOs) DBRM_THROW
{
#ifdef BRM_INFO
if (fDebug)
{
TRACER_WRITELATER("bulkWriteVBEntry");
TRACER_WRITE;
}
#endif
ByteStream command, response;
uint8_t err;
command << BULK_WRITE_VB_ENTRY << (uint32_t) transID;
serializeInlineVector(command, lbids);
command << (uint32_t) vbOID;
serializeInlineVector(command, vbFBOs);
err = send_recv(command, response);
if (err != ERR_OK)
return err;
if (response.length() != 1)
return ERR_NETWORK;
response >> err;
CHECK_EMPTY(response);
return err;
}
struct _entry
{
_entry(LBID_t l) : lbid(l) { };