1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-04 04:42:30 +03:00

Reformat all code to coding standard

This commit is contained in:
Andrew Hutchings
2017-10-26 17:18:17 +01:00
parent 4985f3456e
commit 01446d1e22
1296 changed files with 403852 additions and 353747 deletions

View File

@ -59,77 +59,83 @@ volatile uint32_t MultiReturnCode;
int32_t extractRespCode(const ByteStream& bs)
{
if (bs.length() < (sizeof(ISMPacketHeader) + sizeof(int32_t)))
return 1;
const uint8_t* bytePtr = bs.buf();
const ISMPacketHeader* hdrp = reinterpret_cast<const ISMPacketHeader*>(bytePtr);
if (hdrp->Command != CACHE_OP_RESULTS)
return 1;
const int32_t* resp = reinterpret_cast<const int32_t*>(bytePtr + sizeof(ISMPacketHeader));
return *resp;
if (bs.length() < (sizeof(ISMPacketHeader) + sizeof(int32_t)))
return 1;
const uint8_t* bytePtr = bs.buf();
const ISMPacketHeader* hdrp = reinterpret_cast<const ISMPacketHeader*>(bytePtr);
if (hdrp->Command != CACHE_OP_RESULTS)
return 1;
const int32_t* resp = reinterpret_cast<const int32_t*>(bytePtr + sizeof(ISMPacketHeader));
return *resp;
}
class CacheOpThread
{
public:
CacheOpThread(const string& svr, const ByteStream& outBs) : fServerName(svr), fOutBs(outBs) {}
~CacheOpThread() {}
void operator()()
{
struct timespec ts = { 10, 0 };
int32_t rc = 0;
scoped_ptr<MessageQueueClient> cl(new MessageQueueClient(fServerName));
try
{
cl->write(fOutBs);
rc = extractRespCode(cl->read(&ts));
}
catch(...)
{
rc = 1;
}
if (rc != 0)
atomicops::atomicCAS<uint32_t>(&MultiReturnCode, 0, 1);
}
CacheOpThread(const string& svr, const ByteStream& outBs) : fServerName(svr), fOutBs(outBs) {}
~CacheOpThread() {}
void operator()()
{
struct timespec ts = { 10, 0 };
int32_t rc = 0;
scoped_ptr<MessageQueueClient> cl(new MessageQueueClient(fServerName));
try
{
cl->write(fOutBs);
rc = extractRespCode(cl->read(&ts));
}
catch (...)
{
rc = 1;
}
if (rc != 0)
atomicops::atomicCAS<uint32_t>(&MultiReturnCode, 0, 1);
}
private:
//CacheOpThread(const CacheOpThread& rhs);
//CacheOpThread& operator=(const CacheOpThread& rhs);
//CacheOpThread(const CacheOpThread& rhs);
//CacheOpThread& operator=(const CacheOpThread& rhs);
string fServerName;
ByteStream fOutBs;
string fServerName;
ByteStream fOutBs;
};
int sendToAll(const ByteStream& outBs)
{
//Not thread-safe: external synchronization is needed!
//Not thread-safe: external synchronization is needed!
// added code here to flush any running primprocs that may be active
// TODO: we really only need to flush each unique PrimProc, but we can't tell from the
// config file which those are, so use the same logic as joblist::DistributedEngineComm
Config* cf = Config::makeConfig();
// added code here to flush any running primprocs that may be active
// TODO: we really only need to flush each unique PrimProc, but we can't tell from the
// config file which those are, so use the same logic as joblist::DistributedEngineComm
Config* cf = Config::makeConfig();
const string section = "PrimitiveServers";
int cnt = static_cast<int>(Config::fromText(cf->getConfig(section, "Count")));
if (cnt <= 0) cnt = 1;
const string section = "PrimitiveServers";
int cnt = static_cast<int>(Config::fromText(cf->getConfig(section, "Count")));
thread_group tg;
int rc = 0;
MultiReturnCode = 0;
if (cnt <= 0) cnt = 1;
for (int i = 0; i < cnt; i++)
{
ostringstream oss;
oss << "PMS" << (i + 1);
tg.create_thread(CacheOpThread(oss.str(), outBs));
}
thread_group tg;
int rc = 0;
MultiReturnCode = 0;
tg.join_all();
for (int i = 0; i < cnt; i++)
{
ostringstream oss;
oss << "PMS" << (i + 1);
tg.create_thread(CacheOpThread(oss.str(), outBs));
}
if (MultiReturnCode != 0)
rc = -1;
tg.join_all();
return rc;
if (MultiReturnCode != 0)
rc = -1;
return rc;
}
}
@ -142,24 +148,25 @@ namespace cacheutils
*/
int flushPrimProcCache()
{
mutex::scoped_lock lk(CacheOpsMutex);
mutex::scoped_lock lk(CacheOpsMutex);
try
{
const int msgsize = sizeof(ISMPacketHeader);
uint8_t msgbuf[msgsize];
memset(msgbuf, 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(&msgbuf[0]);
hdrp->Command = CACHE_FLUSH;
try
{
const int msgsize = sizeof(ISMPacketHeader);
uint8_t msgbuf[msgsize];
memset(msgbuf, 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(&msgbuf[0]);
hdrp->Command = CACHE_FLUSH;
ByteStream bs(msgbuf, msgsize);
int rc = sendToAll(bs);
return rc;
}
catch (...)
{
}
return -1;
ByteStream bs(msgbuf, msgsize);
int rc = sendToAll(bs);
return rc;
}
catch (...)
{
}
return -1;
}
/**
@ -167,165 +174,176 @@ int flushPrimProcCache()
*/
int flushPrimProcBlocks(const BRM::BlockList_t& list)
{
if (list.empty()) return 0;
if (list.empty()) return 0;
mutex::scoped_lock lk(CacheOpsMutex);
mutex::scoped_lock lk(CacheOpsMutex);
#if defined(__LP64__) || defined(_WIN64)
if (list.size() > numeric_limits<uint32_t>::max()) return -1;
if (list.size() > numeric_limits<uint32_t>::max()) return -1;
#endif
try
{
const size_t msgsize = sizeof(ISMPacketHeader) + sizeof(uint32_t) + sizeof(LbidAtVer) * list.size();
scoped_array<uint8_t> msgbuf(new uint8_t[msgsize]);
memset(msgbuf.get(), 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(msgbuf.get());
hdrp->Command = CACHE_CLEAN_VSS;
uint32_t* cntp = reinterpret_cast<uint32_t*>(msgbuf.get() + sizeof(ISMPacketHeader));
*cntp = static_cast<uint32_t>(list.size());
LbidAtVer* itemp = reinterpret_cast<LbidAtVer*>(msgbuf.get() + sizeof(ISMPacketHeader) + sizeof(uint32_t));
BlockList_t::const_iterator iter = list.begin();
BlockList_t::const_iterator end = list.end();
while (iter != end)
{
itemp->LBID = static_cast<uint64_t>(iter->first);
itemp->Ver = static_cast<uint32_t>(iter->second);
++itemp;
++iter;
}
try
{
const size_t msgsize = sizeof(ISMPacketHeader) + sizeof(uint32_t) + sizeof(LbidAtVer) * list.size();
scoped_array<uint8_t> msgbuf(new uint8_t[msgsize]);
memset(msgbuf.get(), 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(msgbuf.get());
hdrp->Command = CACHE_CLEAN_VSS;
uint32_t* cntp = reinterpret_cast<uint32_t*>(msgbuf.get() + sizeof(ISMPacketHeader));
*cntp = static_cast<uint32_t>(list.size());
LbidAtVer* itemp = reinterpret_cast<LbidAtVer*>(msgbuf.get() + sizeof(ISMPacketHeader) + sizeof(uint32_t));
BlockList_t::const_iterator iter = list.begin();
BlockList_t::const_iterator end = list.end();
ByteStream bs(msgbuf.get(), msgsize);
int rc = sendToAll(bs);
return rc;
}
catch (...)
{
}
return -1;
while (iter != end)
{
itemp->LBID = static_cast<uint64_t>(iter->first);
itemp->Ver = static_cast<uint32_t>(iter->second);
++itemp;
++iter;
}
ByteStream bs(msgbuf.get(), msgsize);
int rc = sendToAll(bs);
return rc;
}
catch (...)
{
}
return -1;
}
int flushPrimProcAllverBlocks(const vector<LBID_t> &list)
int flushPrimProcAllverBlocks(const vector<LBID_t>& list)
{
if (list.empty()) return 0;
ByteStream bs(sizeof(ISMPacketHeader) + sizeof(uint32_t) + (sizeof(LBID_t) * list.size()));
ISMPacketHeader *hdr;
ISMPacketHeader* hdr;
int rc;
hdr = (ISMPacketHeader *) bs.getInputPtr();
hdr = (ISMPacketHeader*) bs.getInputPtr();
hdr->Command = FLUSH_ALL_VERSION;
bs.advanceInputPtr(sizeof(ISMPacketHeader));
bs << (uint32_t) list.size();
bs.append((uint8_t *) &list[0], sizeof(LBID_t) * list.size());
bs.append((uint8_t*) &list[0], sizeof(LBID_t) * list.size());
try {
mutex::scoped_lock lk(CacheOpsMutex);
try
{
mutex::scoped_lock lk(CacheOpsMutex);
rc = sendToAll(bs);
return rc;
}
catch (...)
catch (...)
{
}
return -1;
}
int flushOIDsFromCache(const vector<BRM::OID_t> &oids)
int flushOIDsFromCache(const vector<BRM::OID_t>& oids)
{
/* Message format:
* ISMPacketHeader
* uint32_t - OID count
* uint32_t * - OID array
*/
/* Message format:
* ISMPacketHeader
* uint32_t - OID count
* uint32_t * - OID array
*/
mutex::scoped_lock lk(CacheOpsMutex, defer_lock_t());
mutex::scoped_lock lk(CacheOpsMutex, defer_lock_t());
ByteStream bs;
ISMPacketHeader ism;
uint32_t i;
ByteStream bs;
ISMPacketHeader ism;
uint32_t i;
memset(&ism, 0, sizeof(ISMPacketHeader));
ism.Command = CACHE_FLUSH_BY_OID;
bs.load((uint8_t *) &ism, sizeof(ISMPacketHeader));
bs << (uint32_t) oids.size();
for (i = 0; i < oids.size(); i++)
bs << (uint32_t) oids[i];
memset(&ism, 0, sizeof(ISMPacketHeader));
ism.Command = CACHE_FLUSH_BY_OID;
bs.load((uint8_t*) &ism, sizeof(ISMPacketHeader));
bs << (uint32_t) oids.size();
lk.lock();
return sendToAll(bs);
for (i = 0; i < oids.size(); i++)
bs << (uint32_t) oids[i];
lk.lock();
return sendToAll(bs);
}
int flushPartition(const std::vector<BRM::OID_t> &oids, set<BRM::LogicalPartition>& partitionNums)
int flushPartition(const std::vector<BRM::OID_t>& oids, set<BRM::LogicalPartition>& partitionNums)
{
/* Message format:
* ISMPacketHeader
* uint32_t - partition count
* LogicalPartition * - partitionNum
* uint32_t - OID count
* uint32_t * - OID array
*/
/* Message format:
* ISMPacketHeader
* uint32_t - partition count
* LogicalPartition * - partitionNum
* uint32_t - OID count
* uint32_t * - OID array
*/
mutex::scoped_lock lk(CacheOpsMutex, defer_lock_t());
mutex::scoped_lock lk(CacheOpsMutex, defer_lock_t());
ByteStream bs;
ISMPacketHeader ism;
ByteStream bs;
ISMPacketHeader ism;
memset(&ism, 0, sizeof(ISMPacketHeader));
ism.Command = CACHE_FLUSH_PARTITION;
bs.load((uint8_t *) &ism, sizeof(ISMPacketHeader));
serializeSet<BRM::LogicalPartition>(bs, partitionNums);
serializeInlineVector<BRM::OID_t>(bs, oids);
memset(&ism, 0, sizeof(ISMPacketHeader));
ism.Command = CACHE_FLUSH_PARTITION;
bs.load((uint8_t*) &ism, sizeof(ISMPacketHeader));
serializeSet<BRM::LogicalPartition>(bs, partitionNums);
serializeInlineVector<BRM::OID_t>(bs, oids);
lk.lock();
return sendToAll(bs);
lk.lock();
return sendToAll(bs);
}
int dropPrimProcFdCache()
{
const int msgsize = sizeof(ISMPacketHeader);
uint8_t msgbuf[msgsize];
memset(msgbuf, 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(&msgbuf[0]);
hdrp->Command = CACHE_DROP_FDS;
ByteStream bs(msgbuf, msgsize);
try
{
mutex::scoped_lock lk(CacheOpsMutex);
int rc = sendToAll(bs);
return rc;
}
catch (...)
{
}
return -1;
const int msgsize = sizeof(ISMPacketHeader);
uint8_t msgbuf[msgsize];
memset(msgbuf, 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(&msgbuf[0]);
hdrp->Command = CACHE_DROP_FDS;
ByteStream bs(msgbuf, msgsize);
try
{
mutex::scoped_lock lk(CacheOpsMutex);
int rc = sendToAll(bs);
return rc;
}
catch (...)
{
}
return -1;
}
int purgePrimProcFdCache(const std::vector<BRM::FileInfo> files, const int pmId)
{
const int msgsize = sizeof(ISMPacketHeader);
uint8_t msgbuf[msgsize];
memset(msgbuf, 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(&msgbuf[0]);
hdrp->Command = CACHE_PURGE_FDS;
ByteStream bs(msgbuf, msgsize);
serializeInlineVector<FileInfo>(bs, files);
int32_t rc = 0;
try
{
struct timespec ts = { 10, 0 };
ostringstream oss;
oss << "PMS" << pmId;
scoped_ptr<MessageQueueClient> cl(new MessageQueueClient(oss.str()));
cl->write(bs);
rc = extractRespCode(cl->read(&ts));
}
catch (...)
{
rc = -1;
}
return rc;
const int msgsize = sizeof(ISMPacketHeader);
uint8_t msgbuf[msgsize];
memset(msgbuf, 0, sizeof(ISMPacketHeader));
ISMPacketHeader* hdrp = reinterpret_cast<ISMPacketHeader*>(&msgbuf[0]);
hdrp->Command = CACHE_PURGE_FDS;
ByteStream bs(msgbuf, msgsize);
serializeInlineVector<FileInfo>(bs, files);
int32_t rc = 0;
try
{
struct timespec ts = { 10, 0 };
ostringstream oss;
oss << "PMS" << pmId;
scoped_ptr<MessageQueueClient> cl(new MessageQueueClient(oss.str()));
cl->write(bs);
rc = extractRespCode(cl->read(&ts));
}
catch (...)
{
rc = -1;
}
return rc;
}
}