1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-12-13 23:02:14 +03:00

Fixed several bugs exposed by setting the cache size to an

unreasonably small value.  Had to make a compromise to avoid a deadlock
though.  read/write/append/truncate will now be able to exceed the cache size
limit temporarily.  The cache will be reconciled at the end of the respective
operation.

Ex, given a cache of 100MB, and a read() of 500MB, all 500MB
of data being read will stay in the cache until it is read, then
400MB of it will be evicted.  Same on the write side.
This commit is contained in:
Patrick LeBlanc
2019-06-28 13:20:31 -05:00
parent 91585b4c6d
commit 715d041a85
9 changed files with 84 additions and 44 deletions

View File

@@ -2,6 +2,7 @@
#include "Cache.h" #include "Cache.h"
#include "Config.h" #include "Config.h"
#include "Downloader.h" #include "Downloader.h"
#include "Synchronizer.h"
#include <iostream> #include <iostream>
#include <syslog.h> #include <syslog.h>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
@@ -120,7 +121,6 @@ Cache::~Cache()
void Cache::populate() void Cache::populate()
{ {
Synchronizer *sync = Synchronizer::get();
bf::directory_iterator dir(prefix); bf::directory_iterator dir(prefix);
bf::directory_iterator dend; bf::directory_iterator dend;
while (dir != dend) while (dir != dend)
@@ -147,9 +147,7 @@ void Cache::populate()
if (bf::is_regular_file(p)) if (bf::is_regular_file(p))
{ {
if (p.extension() == ".journal") if (p.extension() == ".journal")
{
currentCacheSize += bf::file_size(*dir); currentCacheSize += bf::file_size(*dir);
sync->newJournalEntry(p.stem());
else else
logger->log(LOG_WARNING, "Cache: found a file in the journal dir that does not belong '%s'", p.string().c_str()); logger->log(LOG_WARNING, "Cache: found a file in the journal dir that does not belong '%s'", p.string().c_str());
} }
@@ -257,7 +255,7 @@ void Cache::read(const vector<string> &keys)
TBD_t::iterator tbd_it = toBeDeleted.find(mit->lit); TBD_t::iterator tbd_it = toBeDeleted.find(mit->lit);
if (tbd_it != toBeDeleted.end()) if (tbd_it != toBeDeleted.end())
{ {
cout << "Saved one from being deleted" << endl; //cout << "Saved one from being deleted" << endl;
toBeDeleted.erase(tbd_it); toBeDeleted.erase(tbd_it);
} }
} }
@@ -297,7 +295,7 @@ void Cache::read(const vector<string> &keys)
} }
// fix cache size // fix cache size
_makeSpace(sum_sizes); //_makeSpace(sum_sizes);
currentCacheSize += sum_sizes; currentCacheSize += sum_sizes;
} }
@@ -310,7 +308,12 @@ void Cache::doneReading(const vector<string> &keys)
if (it != m_lru.end()) if (it != m_lru.end())
removeFromDNE(it->lit); removeFromDNE(it->lit);
} }
readyToDelete.notify_all(); _makeSpace(0);
}
void Cache::doneWriting()
{
makeSpace(0);
} }
Cache::DNEElement::DNEElement(const LRU_t::iterator &k) : key(k), refCount(1) Cache::DNEElement::DNEElement(const LRU_t::iterator &k) : key(k), refCount(1)
@@ -369,7 +372,7 @@ void Cache::newObject(const string &key, size_t size)
{ {
boost::unique_lock<boost::mutex> s(lru_mutex); boost::unique_lock<boost::mutex> s(lru_mutex);
assert(m_lru.find(key) == m_lru.end()); assert(m_lru.find(key) == m_lru.end());
_makeSpace(size); //_makeSpace(size);
lru.push_back(key); lru.push_back(key);
LRU_t::iterator back = lru.end(); LRU_t::iterator back = lru.end();
m_lru.insert(--back); m_lru.insert(--back);
@@ -379,7 +382,7 @@ void Cache::newObject(const string &key, size_t size)
void Cache::newJournalEntry(size_t size) void Cache::newJournalEntry(size_t size)
{ {
boost::unique_lock<boost::mutex> s(lru_mutex); boost::unique_lock<boost::mutex> s(lru_mutex);
_makeSpace(size); //_makeSpace(size);
currentCacheSize += size; currentCacheSize += size;
} }
@@ -432,7 +435,9 @@ void Cache::_makeSpace(size_t size)
ssize_t thisMuch = currentCacheSize + size - maxCacheSize; ssize_t thisMuch = currentCacheSize + size - maxCacheSize;
if (thisMuch <= 0) if (thisMuch <= 0)
return; return;
if (thisMuch > (ssize_t) currentCacheSize)
thisMuch = currentCacheSize;
LRU_t::iterator it; LRU_t::iterator it;
while (thisMuch > 0 && !lru.empty()) while (thisMuch > 0 && !lru.empty())
{ {
@@ -447,10 +452,8 @@ void Cache::_makeSpace(size_t size)
} }
if (it == lru.end()) if (it == lru.end())
{ {
// nothing can be deleted, wait for something to change // nothing can be deleted right now
cout << "nothing can be deleted, waiting" << endl; return;
readyToDelete.wait(lru_mutex);
continue;
} }
@@ -492,8 +495,17 @@ void Cache::_makeSpace(size_t size)
lru.erase(it); lru.erase(it);
size_t newSize = bf::file_size(cachedFile); size_t newSize = bf::file_size(cachedFile);
replicator->remove(cachedFile, Replicator::LOCAL_ONLY); replicator->remove(cachedFile, Replicator::LOCAL_ONLY);
currentCacheSize -= newSize; if (newSize < currentCacheSize)
thisMuch -= newSize; {
currentCacheSize -= newSize;
thisMuch -= newSize;
}
else
{
logger->log(LOG_WARNING, "Cache::makeSpace(): accounting error. Almost wrapped currentCacheSize on flush.");
currentCacheSize = 0;
thisMuch = 0;
}
} }
} }
} }
@@ -511,7 +523,7 @@ void Cache::rename(const string &oldKey, const string &newKey, ssize_t sizediff)
auto lit = it->lit; auto lit = it->lit;
m_lru.erase(it); m_lru.erase(it);
int refCount = 0; int refCount = 0;
auto dne_it = doNotEvict.find(it->lit); auto dne_it = doNotEvict.find(lit);
if (dne_it != doNotEvict.end()) if (dne_it != doNotEvict.end())
{ {
refCount = dne_it->refCount; refCount = dne_it->refCount;
@@ -544,12 +556,11 @@ int Cache::ifExistsThenDelete(const string &key)
boost::unique_lock<boost::mutex> s(lru_mutex); boost::unique_lock<boost::mutex> s(lru_mutex);
bool objectExists = false; bool objectExists = false;
bool journalExists = bf::exists(journalPath);
auto it = m_lru.find(key); auto it = m_lru.find(key);
if (it != m_lru.end()) if (it != m_lru.end())
{ {
// let makeSpace() delete it if it's already being flushed
if (toBeDeleted.find(it->lit) == toBeDeleted.end()) if (toBeDeleted.find(it->lit) == toBeDeleted.end())
{ {
doNotEvict.erase(it->lit); doNotEvict.erase(it->lit);
@@ -557,8 +568,11 @@ int Cache::ifExistsThenDelete(const string &key)
m_lru.erase(it); m_lru.erase(it);
objectExists = true; objectExists = true;
} }
else // let makeSpace() delete it if it's already in progress
return 0;
} }
//assert(objectExists == bf::exists(cachedPath)); bool journalExists = bf::exists(journalPath);
assert(objectExists == bf::exists(cachedPath));
size_t objectSize = (objectExists ? bf::file_size(cachedPath) : 0); size_t objectSize = (objectExists ? bf::file_size(cachedPath) : 0);
//size_t objectSize = (objectExists ? MetadataFile::getLengthFromKey(key) : 0); //size_t objectSize = (objectExists ? MetadataFile::getLengthFromKey(key) : 0);

View File

@@ -17,20 +17,26 @@
namespace storagemanager namespace storagemanager
{ {
class Synchronizer;
class Cache : public boost::noncopyable class Cache : public boost::noncopyable
{ {
public: public:
static Cache *get(); static Cache *get();
virtual ~Cache(); virtual ~Cache();
//reading fcns
// read() marks objects to be read s.t. they do not get flushed.
// after reading them, unlock the 'logical file', and call doneReading().
void read(const std::vector<std::string> &keys); void read(const std::vector<std::string> &keys);
void doneReading(const std::vector<std::string> &keys); void doneReading(const std::vector<std::string> &keys);
bool exists(const std::string &key) const; bool exists(const std::string &key) const;
void exists(const std::vector<std::string> &keys, std::vector<bool> *out) const; void exists(const std::vector<std::string> &keys, std::vector<bool> *out) const;
// writing fcns
// new*() fcns tell the cache data was added. After writing a set of objects,
// unlock the 'logical file', and call doneWriting().
void newObject(const std::string &key, size_t size); void newObject(const std::string &key, size_t size);
void newJournalEntry(size_t size); void newJournalEntry(size_t size);
void doneWriting();
void deletedObject(const std::string &key, size_t size); void deletedObject(const std::string &key, size_t size);
void deletedJournal(size_t size); void deletedJournal(size_t size);
@@ -116,7 +122,6 @@ class Cache : public boost::noncopyable
DNE_t doNotEvict; DNE_t doNotEvict;
void addToDNE(const LRU_t::iterator &key); void addToDNE(const LRU_t::iterator &key);
void removeFromDNE(const LRU_t::iterator &key); void removeFromDNE(const LRU_t::iterator &key);
boost::condition readyToDelete;
// the to-be-deleted set. Elements removed from the LRU but not yet deleted will be here. // the to-be-deleted set. Elements removed from the LRU but not yet deleted will be here.
// Elements are inserted and removed by makeSpace(). If read() references a file that is in this, // Elements are inserted and removed by makeSpace(). If read() references a file that is in this,

View File

@@ -1,7 +1,7 @@
#include "IOCoordinator.h" #include "IOCoordinator.h"
#include "MetadataFile.h" #include "MetadataFile.h"
#include "Utilities.h" #include "Synchronizer.h"
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <fcntl.h> #include <fcntl.h>
@@ -178,6 +178,7 @@ int IOCoordinator::read(const char *_filename, uint8_t *data, off_t offset, size
} }
else if (errno != ENOENT) else if (errno != ENOENT)
{ {
fileLock.unlock();
cache->doneReading(keys); cache->doneReading(keys);
int l_errno = errno; int l_errno = errno;
logger->log(LOG_CRIT, "IOCoordinator::read(): Got an unexpected error opening %s, error was '%s'", logger->log(LOG_CRIT, "IOCoordinator::read(): Got an unexpected error opening %s, error was '%s'",
@@ -191,6 +192,7 @@ int IOCoordinator::read(const char *_filename, uint8_t *data, off_t offset, size
fd = ::open(filename.c_str(), O_RDONLY); fd = ::open(filename.c_str(), O_RDONLY);
if (fd < 0) if (fd < 0)
{ {
fileLock.unlock();
cache->doneReading(keys); cache->doneReading(keys);
int l_errno = errno; int l_errno = errno;
logger->log(LOG_CRIT, "IOCoordinator::read(): Got an unexpected error opening %s, error was '%s'", logger->log(LOG_CRIT, "IOCoordinator::read(): Got an unexpected error opening %s, error was '%s'",
@@ -221,7 +223,7 @@ int IOCoordinator::read(const char *_filename, uint8_t *data, off_t offset, size
off_t thisOffset = (count == 0 ? offset - object.offset : 0); off_t thisOffset = (count == 0 ? offset - object.offset : 0);
// This checks and returns if the read is starting past EOF // This checks and returns if the read is starting past EOF
if (thisOffset >= (off_t) object.length) if (thisOffset >= (off_t) object.length)
return count; goto out;
// if this is the last object, the length of the read is length - count, // if this is the last object, the length of the read is length - count,
// otherwise it is the length of the object - starting offset // otherwise it is the length of the object - starting offset
@@ -233,6 +235,7 @@ int IOCoordinator::read(const char *_filename, uint8_t *data, off_t offset, size
&data[count], thisOffset, thisLength); &data[count], thisOffset, thisLength);
if (err) if (err)
{ {
fileLock.unlock();
cache->doneReading(keys); cache->doneReading(keys);
if (count == 0) if (count == 0)
return -1; return -1;
@@ -243,6 +246,8 @@ int IOCoordinator::read(const char *_filename, uint8_t *data, off_t offset, size
count += thisLength; count += thisLength;
} }
out:
fileLock.unlock();
cache->doneReading(keys); cache->doneReading(keys);
// all done // all done
return count; return count;
@@ -254,7 +259,10 @@ int IOCoordinator::write(const char *_filename, const uint8_t *data, off_t offse
const char *filename = p.string().c_str(); const char *filename = p.string().c_str();
ScopedWriteLock lock(this, filename); ScopedWriteLock lock(this, filename);
return _write(filename, data, offset, length); int ret = _write(filename, data, offset, length);
lock.unlock();
cache->doneWriting();
return ret;
} }
int IOCoordinator::_write(const char *filename, const uint8_t *data, off_t offset, size_t length) int IOCoordinator::_write(const char *filename, const uint8_t *data, off_t offset, size_t length)
@@ -298,7 +306,7 @@ int IOCoordinator::_write(const char *filename, const uint8_t *data, off_t offse
writeLength = min(objectSize,dataRemaining); writeLength = min(objectSize,dataRemaining);
objectOffset = 0; objectOffset = 0;
} }
cache->makeSpace(writeLength+JOURNAL_ENTRY_HEADER_SIZE); //cache->makeSpace(writeLength+JOURNAL_ENTRY_HEADER_SIZE);
err = replicator->addJournalEntry(i->key.c_str(),&data[count],objectOffset,writeLength); err = replicator->addJournalEntry(i->key.c_str(),&data[count],objectOffset,writeLength);
assert((uint) err == writeLength); assert((uint) err == writeLength);
@@ -317,7 +325,6 @@ int IOCoordinator::_write(const char *filename, const uint8_t *data, off_t offse
if ((writeLength + objectOffset) > i->length) if ((writeLength + objectOffset) > i->length)
metadata.updateEntryLength(i->offset, (writeLength + objectOffset)); metadata.updateEntryLength(i->offset, (writeLength + objectOffset));
cache->newJournalEntry(writeLength+JOURNAL_ENTRY_HEADER_SIZE); cache->newJournalEntry(writeLength+JOURNAL_ENTRY_HEADER_SIZE);
synchronizer->newJournalEntry(i->key); synchronizer->newJournalEntry(i->key);
@@ -349,7 +356,7 @@ int IOCoordinator::_write(const char *filename, const uint8_t *data, off_t offse
// writeLength is the data length passed to write() // writeLength is the data length passed to write()
// objectOffset is 0 unless the write starts beyond the end of data // objectOffset is 0 unless the write starts beyond the end of data
// in that case need to add the null data to cachespace // in that case need to add the null data to cachespace
cache->makeSpace(writeLength + objectOffset); //cache->makeSpace(writeLength + objectOffset);
metadataObject newObject = metadata.addMetadataObject(filename,(writeLength + objectOffset)); metadataObject newObject = metadata.addMetadataObject(filename,(writeLength + objectOffset));
@@ -373,7 +380,6 @@ int IOCoordinator::_write(const char *filename, const uint8_t *data, off_t offse
count += writeLength; count += writeLength;
dataRemaining -= writeLength; dataRemaining -= writeLength;
} }
synchronizer->newObjects(newObjectKeys); synchronizer->newObjects(newObjectKeys);
metadata.writeMetadata(filename); metadata.writeMetadata(filename);
@@ -419,7 +425,7 @@ int IOCoordinator::append(const char *_filename, const uint8_t *data, size_t len
// figure out how much data to write to this object // figure out how much data to write to this object
writeLength = min((objectSize - i->length),dataRemaining); writeLength = min((objectSize - i->length),dataRemaining);
cache->makeSpace(writeLength+JOURNAL_ENTRY_HEADER_SIZE); //cache->makeSpace(writeLength+JOURNAL_ENTRY_HEADER_SIZE);
err = replicator->addJournalEntry(i->key.c_str(),&data[count],i->length,writeLength); err = replicator->addJournalEntry(i->key.c_str(),&data[count],i->length,writeLength);
assert((uint) err == writeLength); assert((uint) err == writeLength);
@@ -428,14 +434,13 @@ int IOCoordinator::append(const char *_filename, const uint8_t *data, size_t len
metadata.updateEntryLength(i->offset, (count + i->length)); metadata.updateEntryLength(i->offset, (count + i->length));
metadata.writeMetadata(filename); metadata.writeMetadata(filename);
logger->log(LOG_ERR,"IOCoordinator::append(): journal failed to complete write, %u of %u bytes written.",count,length); logger->log(LOG_ERR,"IOCoordinator::append(): journal failed to complete write, %u of %u bytes written.",count,length);
return count; goto out;
} }
metadata.updateEntryLength(i->offset, (writeLength + i->length)); metadata.updateEntryLength(i->offset, (writeLength + i->length));
cache->newJournalEntry(writeLength+JOURNAL_ENTRY_HEADER_SIZE); cache->newJournalEntry(writeLength+JOURNAL_ENTRY_HEADER_SIZE);
synchronizer->newJournalEntry(i->key); synchronizer->newJournalEntry(i->key);
count += writeLength; count += writeLength;
dataRemaining -= writeLength; dataRemaining -= writeLength;
} }
@@ -453,7 +458,7 @@ int IOCoordinator::append(const char *_filename, const uint8_t *data, size_t len
//add a new metaDataObject //add a new metaDataObject
writeLength = min(objectSize,dataRemaining); writeLength = min(objectSize,dataRemaining);
cache->makeSpace(writeLength); //cache->makeSpace(writeLength);
// add a new metadata object, this will get a new objectKey NOTE: probably needs offset too // add a new metadata object, this will get a new objectKey NOTE: probably needs offset too
metadataObject newObject = metadata.addMetadataObject(filename,writeLength); metadataObject newObject = metadata.addMetadataObject(filename,writeLength);
@@ -466,7 +471,7 @@ int IOCoordinator::append(const char *_filename, const uint8_t *data, size_t len
metadata.updateEntryLength(newObject.offset, (count)); metadata.updateEntryLength(newObject.offset, (count));
metadata.writeMetadata(filename); metadata.writeMetadata(filename);
logger->log(LOG_ERR,"IOCoordinator::append(): newObject failed to complete write, %u of %u bytes written.",count,length); logger->log(LOG_ERR,"IOCoordinator::append(): newObject failed to complete write, %u of %u bytes written.",count,length);
return count; goto out;
//log error and abort //log error and abort
} }
cache->newObject(newObject.key,writeLength); cache->newObject(newObject.key,writeLength);
@@ -475,12 +480,14 @@ int IOCoordinator::append(const char *_filename, const uint8_t *data, size_t len
count += writeLength; count += writeLength;
dataRemaining -= writeLength; dataRemaining -= writeLength;
} }
synchronizer->newObjects(newObjectKeys); synchronizer->newObjects(newObjectKeys);
metadata.writeMetadata(filename); metadata.writeMetadata(filename);
// had to add this hack to prevent deadlock
out:
// need to release the file lock before calling the cache fcns.
lock.unlock(); lock.unlock();
cache->doneWriting();
return count; return count;
} }
@@ -581,6 +588,8 @@ int IOCoordinator::truncate(const char *_path, size_t newSize)
{ {
uint8_t zero = 0; uint8_t zero = 0;
err = _write(path, &zero, newSize - 1, 1); err = _write(path, &zero, newSize - 1, 1);
lock.unlock();
cache->doneWriting();
if (err < 0) if (err < 0)
return -1; return -1;
return 0; return 0;

View File

@@ -17,6 +17,7 @@
#include "SMLogging.h" #include "SMLogging.h"
#include "RWLock.h" #include "RWLock.h"
#include "Replicator.h" #include "Replicator.h"
#include "Utilities.h"
namespace storagemanager namespace storagemanager
{ {
@@ -85,6 +86,7 @@ class IOCoordinator : public boost::noncopyable
void remove(const boost::filesystem::path &path); void remove(const boost::filesystem::path &path);
void deleteMetaFile(const boost::filesystem::path &file); void deleteMetaFile(const boost::filesystem::path &file);
int _write(const char *filename, const uint8_t *data, off_t offset, size_t length); int _write(const char *filename, const uint8_t *data, off_t offset, size_t length);
int loadObjectAndJournal(const char *objFilename, const char *journalFilename, int loadObjectAndJournal(const char *objFilename, const char *journalFilename,

View File

@@ -2,7 +2,6 @@
#include "Replicator.h" #include "Replicator.h"
#include "IOCoordinator.h" #include "IOCoordinator.h"
#include "SMLogging.h" #include "SMLogging.h"
#include "Cache.h"
#include "Utilities.h" #include "Utilities.h"
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
@@ -122,7 +121,6 @@ int Replicator::addJournalEntry(const char *filename, const uint8_t *data, off_t
int version = 1; int version = 1;
string journalFilename = msJournalPath + "/" + string(filename) + ".journal"; string journalFilename = msJournalPath + "/" + string(filename) + ".journal";
uint64_t thisEntryMaxOffset = (offset + length - 1); uint64_t thisEntryMaxOffset = (offset + length - 1);
Cache *cache = Cache::get(); // need to init sync here to break circular dependency...
bool exists = boost::filesystem::exists(journalFilename); bool exists = boost::filesystem::exists(journalFilename);
OPEN(journalFilename.c_str(), (exists ? O_RDWR : O_WRONLY | O_CREAT)) OPEN(journalFilename.c_str(), (exists ? O_RDWR : O_WRONLY | O_CREAT))
@@ -132,7 +130,6 @@ int Replicator::addJournalEntry(const char *filename, const uint8_t *data, off_t
// create new journal file with header // create new journal file with header
//OPEN(journalFilename.c_str(), O_WRONLY | O_CREAT); //OPEN(journalFilename.c_str(), O_WRONLY | O_CREAT);
string header = (boost::format("{ \"version\" : \"%03i\", \"max_offset\" : \"%011u\" }") % version % thisEntryMaxOffset).str(); string header = (boost::format("{ \"version\" : \"%03i\", \"max_offset\" : \"%011u\" }") % version % thisEntryMaxOffset).str();
cache->makeSpace(header.size());
err = ::write(fd, header.c_str(), header.length() + 1); err = ::write(fd, header.c_str(), header.length() + 1);
assert((uint) err == header.length() + 1); assert((uint) err == header.length() + 1);
if (err <= 0) if (err <= 0)

View File

@@ -46,7 +46,7 @@ void SMLogging::log(int priority,const char *format, ...)
#ifdef DEBUG #ifdef DEBUG
va_list args2; va_list args2;
va_copy(args2, args); va_copy(args2, args);
vprintf(format, args2); vfprintf(stderr, format, args2);
printf("\n"); printf("\n");
#endif #endif
vsyslog(priority, format, args); vsyslog(priority, format, args);

View File

@@ -82,10 +82,8 @@ enum OpFlags
NEW_OBJECT = 0x4, NEW_OBJECT = 0x4,
}; };
void Synchronizer::newJournalEntry(const string &key) void Synchronizer::_newJournalEntry(const string &key)
{ {
boost::unique_lock<boost::mutex> s(mutex);
auto it = pendingOps.find(key); auto it = pendingOps.find(key);
if (it != pendingOps.end()) if (it != pendingOps.end())
{ {
@@ -95,6 +93,19 @@ void Synchronizer::newJournalEntry(const string &key)
//makeJob(key); //makeJob(key);
pendingOps[key] = boost::shared_ptr<PendingOps>(new PendingOps(JOURNAL)); pendingOps[key] = boost::shared_ptr<PendingOps>(new PendingOps(JOURNAL));
} }
void Synchronizer::newJournalEntry(const string &key)
{
boost::unique_lock<boost::mutex> s(mutex);
_newJournalEntry(key);
}
void Synchronizer::newJournalEntries(const vector<string> &keys)
{
boost::unique_lock<boost::mutex> s(mutex);
for (const string &key : keys)
_newJournalEntry(key);
}
void Synchronizer::newObjects(const vector<string> &keys) void Synchronizer::newObjects(const vector<string> &keys)
{ {

View File

@@ -30,6 +30,7 @@ class Synchronizer : public boost::noncopyable
// these take keys as parameters, not full path names, ex, pass in '12345' not // these take keys as parameters, not full path names, ex, pass in '12345' not
// 'cache/12345'. // 'cache/12345'.
void newJournalEntry(const std::string &key); void newJournalEntry(const std::string &key);
void newJournalEntries(const std::vector<std::string> &keys);
void newObjects(const std::vector<std::string> &keys); void newObjects(const std::vector<std::string> &keys);
void deletedObjects(const std::vector<std::string> &keys); void deletedObjects(const std::vector<std::string> &keys);
void flushObject(const std::string &key); void flushObject(const std::string &key);
@@ -41,6 +42,7 @@ class Synchronizer : public boost::noncopyable
private: private:
Synchronizer(); Synchronizer();
void _newJournalEntry(const std::string &key);
void process(std::list<std::string>::iterator key); void process(std::list<std::string>::iterator key);
void synchronize(const std::string &sourceFile, std::list<std::string>::iterator &it); void synchronize(const std::string &sourceFile, std::list<std::string>::iterator &it);
void synchronizeDelete(const std::string &sourceFile, std::list<std::string>::iterator &it); void synchronizeDelete(const std::string &sourceFile, std::list<std::string>::iterator &it);
@@ -86,7 +88,6 @@ class Synchronizer : public boost::noncopyable
const boost::chrono::seconds syncInterval = boost::chrono::seconds(10); const boost::chrono::seconds syncInterval = boost::chrono::seconds(10);
void periodicSync(); void periodicSync();
SMLogging *logger; SMLogging *logger;
Cache *cache; Cache *cache;
Replicator *replicator; Replicator *replicator;

View File

@@ -15,6 +15,7 @@
#include "Replicator.h" #include "Replicator.h"
#include "S3Storage.h" #include "S3Storage.h"
#include "Utilities.h" #include "Utilities.h"
#include "Synchronizer.h"
#include <iostream> #include <iostream>
#include <stdlib.h> #include <stdlib.h>