1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-12-10 22:42:30 +03:00

Checkpointing some add'l KPIs to track down bits of interest.

This commit is contained in:
Patrick LeBlanc
2019-08-08 14:54:24 -05:00
parent 583089c6ce
commit fdbbfd44e0
5 changed files with 29 additions and 15 deletions

View File

@@ -65,7 +65,7 @@ IOCoordinator::IOCoordinator()
journalPath = cache->getJournalPath();
bytesRead = bytesWritten = filesOpened = filesCreated = filesCopied = filesDeleted =
bytesCopied = filesTruncated = listingCount = 0;
bytesCopied = filesTruncated = listingCount = callsToWrite = 0;
iocFilesOpened = iocObjectsCreated = iocJournalsCreated = iocBytesWritten = iocFilesDeleted = 0;
}
@@ -96,6 +96,7 @@ void IOCoordinator::printKPIs() const
cout << "\t\tfilesCopied = " << filesCopied << endl;
cout << "\t\tfilesDeleted = " << filesDeleted << endl;
cout << "\t\tfilesTruncated = " << filesTruncated << endl;
cout << "\t\tcallsToWrite = " << callsToWrite << endl;
cout << "\tIOC's POV" << endl;
cout << "\t\tiocFilesOpened = " << iocFilesOpened << endl;
cout << "\t\tiocObjectsCreated = " << iocObjectsCreated << endl;
@@ -282,6 +283,7 @@ out:
ssize_t IOCoordinator::write(const char *_filename, const uint8_t *data, off_t offset, size_t length)
{
++callsToWrite;
bf::path filename = ownership.get(_filename);
const bf::path firstDir = *(filename.begin());
bytesWritten += length;
@@ -822,7 +824,6 @@ int IOCoordinator::copyFile(const char *_filename1, const char *_filename2)
CloudStorage *cs = CloudStorage::get();
Synchronizer *sync = Synchronizer::get();
bf::path metaFile1 = metaPath/(p1.string() + ".meta");
bf::path metaFile2 = metaPath/(p2.string() + ".meta");
int err;
@@ -853,7 +854,6 @@ int IOCoordinator::copyFile(const char *_filename1, const char *_filename2)
vector<pair<string, size_t> > newJournalEntries;
ScopedReadLock lock(this, filename1);
ScopedWriteLock lock2(this, filename2);
MetadataFile meta1(metaFile1, MetadataFile::no_create_t(),false);
MetadataFile meta2(metaFile2, MetadataFile::no_create_t(),false);
vector<metadataObject> objects = meta1.metadataRead(0, meta1.getLength());
@@ -967,9 +967,9 @@ const bf::path &IOCoordinator::getMetadataPath() const
// first byte after the header.
// update: had to make it also return the header; the boost json parser does not stop at either
// a null char or the end of an object.
boost::shared_array<char> seekToEndOfHeader1(int fd)
boost::shared_array<char> seekToEndOfHeader1(int fd, size_t *_bytesRead)
{
::lseek(fd, 0, SEEK_SET);
//::lseek(fd, 0, SEEK_SET);
boost::shared_array<char> ret(new char[100]);
int err;
@@ -984,6 +984,7 @@ boost::shared_array<char> seekToEndOfHeader1(int fd)
if (ret[i] == 0)
{
::lseek(fd, i+1, SEEK_SET);
*_bytesRead = i + 1;
return ret;
}
}
@@ -1013,7 +1014,7 @@ boost::shared_array<uint8_t> IOCoordinator::mergeJournal(const char *object, con
// grab the journal header, make sure the version is 1, and get the max offset
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD);
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD, &l_bytesRead);
stringstream ss;
ss << headertxt.get();
boost::property_tree::ptree header;
@@ -1024,6 +1025,7 @@ boost::shared_array<uint8_t> IOCoordinator::mergeJournal(const char *object, con
// read the object into memory
size_t count = 0;
if (offset != 0)
::lseek(objFD, offset, SEEK_SET);
while (count < len) {
int err = ::read(objFD, &ret[count], len - count);
@@ -1045,7 +1047,7 @@ boost::shared_array<uint8_t> IOCoordinator::mergeJournal(const char *object, con
}
count += err;
}
l_bytesRead += len;
l_bytesRead += count;
// start processing the entries
while (1)
@@ -1066,7 +1068,7 @@ boost::shared_array<uint8_t> IOCoordinator::mergeJournal(const char *object, con
uint64_t startReadingAt = max(offlen[0], (uint64_t) offset);
uint64_t lengthOfRead = min(lastBufOffset, lastJournalOffset) - startReadingAt;
//cout << "MJ: startReadingAt = " << startReadingAt << " lengthOfRead = " << lengthOfRead << endl;
//cout << "MJ: startReadingAt = " << startReadingAt << " offlen[0] = " << offlen[0] << endl;
// seek to the portion of the entry to start reading at
if (startReadingAt != offlen[0])
@@ -1117,7 +1119,7 @@ int IOCoordinator::mergeJournalInMem(boost::shared_array<uint8_t> &objData, size
ScopedCloser s(journalFD);
// grab the journal header and make sure the version is 1
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD);
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD, &l_bytesRead);
stringstream ss;
ss << headertxt.get();
boost::property_tree::ptree header;

View File

@@ -23,7 +23,7 @@
namespace storagemanager
{
boost::shared_array<char> seekToEndOfHeader1(int fd);
boost::shared_array<char> seekToEndOfHeader1(int fd, size_t *bytesRead);
class IOCoordinator : public boost::noncopyable
{
@@ -102,7 +102,7 @@ class IOCoordinator : public boost::noncopyable
// some KPIs
// from the user's POV...
size_t bytesRead, bytesWritten, filesOpened, filesCreated, filesCopied;
size_t filesDeleted, bytesCopied, filesTruncated, listingCount;
size_t filesDeleted, bytesCopied, filesTruncated, listingCount, callsToWrite;
// from IOC's pov...
size_t iocFilesOpened, iocObjectsCreated, iocJournalsCreated, iocFilesDeleted;

View File

@@ -156,7 +156,8 @@ int Replicator::addJournalEntry(const boost::filesystem::path &filename, const u
{
// read the existing header and check if max_offset needs to be updated
//OPEN(journalFilename.c_str(), O_RDWR);
boost::shared_array<char> headertxt = seekToEndOfHeader1(fd);
size_t tmp;
boost::shared_array<char> headertxt = seekToEndOfHeader1(fd, &tmp);
stringstream ss;
ss << headertxt.get();
boost::property_tree::ptree header;

View File

@@ -43,7 +43,8 @@ Synchronizer::Synchronizer() : maxUploads(0)
cs = CloudStorage::get();
numBytesRead = numBytesWritten = numBytesUploaded = numBytesDownloaded = mergeDiff =
flushesTriggeredBySize = flushesTriggeredByTimer = 0;
flushesTriggeredBySize = flushesTriggeredByTimer = journalsMerged =
objectsSyncedWithNoJournal = bytesReadBySync = bytesReadBySyncWithJournal = 0;
string stmp = config->getValue("ObjectStorage", "max_concurrent_uploads");
try
@@ -460,7 +461,9 @@ void Synchronizer::synchronize(const string &sourceFile, list<string>::iterator
if (err)
throw runtime_error(string("synchronize(): uploading ") + key + ", got " + strerror_r(errno, buf, 80));
numBytesRead += mdEntry->length;
bytesReadBySync += mdEntry->length;
numBytesUploaded += mdEntry->length;
++objectsSyncedWithNoJournal;
replicator->remove((cachePath/key), Replicator::NO_LOCAL);
}
@@ -579,6 +582,7 @@ void Synchronizer::synchronizeWithJournal(const string &sourceFile, list<string>
return;
}
numBytesRead += _bytesRead;
bytesReadBySyncWithJournal += _bytesRead;
originalSize += _bytesRead;
}
else
@@ -595,6 +599,7 @@ void Synchronizer::synchronizeWithJournal(const string &sourceFile, list<string>
return;
}
numBytesRead += _bytesRead;
bytesReadBySyncWithJournal += _bytesRead;
originalSize = _bytesRead;
}
@@ -647,6 +652,7 @@ void Synchronizer::synchronizeWithJournal(const string &sourceFile, list<string>
}
mergeDiff += size - originalSize;
++journalsMerged;
// update the metadata for the source file
md.updateEntry(MetadataFile::getOffsetFromKey(key), newCloudKey, size);
@@ -696,12 +702,16 @@ void Synchronizer::printKPIs() const
{
cout << "Synchronizer" << endl;
cout << "\tnumBytesRead: " << numBytesRead << endl;
cout << "\tbytesReadBySync: " << bytesReadBySync << endl;
cout << "\tbytesReadBySyncWithJournal: " << bytesReadBySyncWithJournal << endl;
cout << "\tnumBytesWritten: " << numBytesWritten << endl;
cout << "\tnumBytesUploaded: " << numBytesUploaded << endl;
cout << "\tnumBytesDownloaded: " << numBytesDownloaded << endl;
cout << "\tmergeDiff: " << mergeDiff << endl;
cout << "\tflushesTriggeredBySize: " << flushesTriggeredBySize << endl;
cout << "\tflushesTriggeredByTimer: " << flushesTriggeredByTimer << endl;
cout << "\tjournalsMerged: " << journalsMerged << endl;
cout << "\tobjectsSyncedWithNoJournal: " << objectsSyncedWithNoJournal << endl;
}
/* The helper objects & fcns */

View File

@@ -100,7 +100,8 @@ class Synchronizer : public boost::noncopyable
// some KPIs
size_t numBytesRead, numBytesWritten, numBytesUploaded, numBytesDownloaded,
flushesTriggeredBySize, flushesTriggeredByTimer;
flushesTriggeredBySize, flushesTriggeredByTimer, journalsMerged, objectsSyncedWithNoJournal,
bytesReadBySync, bytesReadBySyncWithJournal;
ssize_t mergeDiff;
SMLogging *logger;