1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-12-24 14:20:59 +03:00

Merge branch 'develop' into develop

This commit is contained in:
Jose Rojas
2020-10-23 16:58:19 -05:00
committed by GitHub
15 changed files with 260 additions and 118 deletions

7
README
View File

@@ -1,12 +1,7 @@
This is MariaDB ColumnStore 1.4
MariaDB ColumnStore 1.4 is the GA version of MariaDB ColumnStore.
This is MariaDB ColumnStore 6.X
It is built by porting InfiniDB 4.6.7 on MariaDB and adding entirely
new features not found anywhere else.
MariaDB ColumnStore 1.4 is a Alpha.
Additional features will be pushed in future releases.
A few things to notice:
- Do not use pre-releases on production systems.
- The building of the ColumnStore engine needs a special build environment. We're working on making it available for everyone to build.

View File

@@ -1,14 +1,15 @@
# MariaDB ColumnStore Storage/Execution engine 1.4
# MariaDB ColumnStore Storage/Execution engine 6.X
MariaDB ColumnStore 1.4 is a GA version of MariaDB ColumnStore.
It is built by porting InfiniDB 4.6.7 on MariaDB and adding entirely
new features not found anywhere else.
# Building
This repository is not meant to be built independently outside of the server. This repository is integrated into http://mariadb-corporation/mariadb-columnstore-server (ie, the *server*) as a git submodule. As such, you can find complete build instructions on *the server* page.
This repository is not meant to be built independently outside of MariaDB server.
https://github.com/mariadb-corporation/mariadb-columnstore-server
https://github.com/MariaDB/server
Building instructions are coming soon.
# Issue tracking

View File

@@ -1,4 +1,4 @@
COLUMNSTORE_VERSION_MAJOR=1
COLUMNSTORE_VERSION_MINOR=6
COLUMNSTORE_VERSION_PATCH=0
COLUMNSTORE_VERSION_MAJOR=5
COLUMNSTORE_VERSION_MINOR=4
COLUMNSTORE_VERSION_PATCH=1
COLUMNSTORE_VERSION_RELEASE=1

View File

@@ -254,6 +254,9 @@ if [ -z "$MCS_USE_S3_STORAGE" ]; then
MCS_S3_ACCESS_KEY_ID="$(find_env_var "MCS_S3_ACCESS_KEY_ID")"
MCS_S3_SECRET_ACCESS_KEY="$(find_env_var "MCS_S3_SECRET_ACCESS_KEY")"
MCS_S3_REGION="$(find_env_var "MCS_S3_REGION")"
MCS_S3_ROLE_NAME="$(find_env_var "MCS_S3_ROLE_NAME")"
MCS_S3_STS_REGION="$(find_env_var "MCS_S3_STS_REGION")"
MCS_S3_STS_ENDPOINT="$(find_env_var "MCS_S3_STS_ENDPOINT")"
fi
if [ ! -z "$MCS_USE_S3_STORAGE" ] && [ $MCS_USE_S3_STORAGE -eq 1 ]; then
@@ -272,28 +275,42 @@ if [ ! -z "$MCS_USE_S3_STORAGE" ] && [ $MCS_USE_S3_STORAGE -eq 1 ]; then
@ENGINE_BINDIR@/mcsSetConfig -d Installation DBRootStorageType "storagemanager"
@ENGINE_BINDIR@/mcsSetConfig -d StorageManager Enabled "Y"
@ENGINE_BINDIR@/mcsSetConfig -d SystemConfig DataFilePlugin "libcloudio.so"
sed -i "s|service =.*|service = S3|" /etc/columnstore/storagemanager.cnf
sed -i "s|^service =.*|service = S3|" /etc/columnstore/storagemanager.cnf
if [ ! -z "$MCS_S3_REGION" ]; then
sed -i "s|region =.*|region = $MCS_S3_REGION|" /etc/columnstore/storagemanager.cnf
sed -i "s|^region =.*|region = $MCS_S3_REGION|" /etc/columnstore/storagemanager.cnf
fi
sed -i "s|bucket =.*|bucket = $MCS_S3_BUCKET|" /etc/columnstore/storagemanager.cnf
sed -i "s|# endpoint =.*|endpoint = $MCS_S3_ENDPOINT|" /etc/columnstore/storagemanager.cnf
sed -i "s|# aws_access_key_id =.*|aws_access_key_id = $MCS_S3_ACCESS_KEY_ID|" /etc/columnstore/storagemanager.cnf
sed -i "s|# aws_secret_access_key =.*|aws_secret_access_key = $MCS_S3_SECRET_ACCESS_KEY|" /etc/columnstore/storagemanager.cnf
testS3Connection
if [ ! -z "$MCS_S3_ROLE_NAME" ]; then
sed -i "s|^# iam_role_name =.*|iam_role_name = $MCS_S3_ROLE_NAME|" /etc/columnstore/storagemanager.cnf
fi
if [ ! -z "$MCS_S3_STS_REGION" ]; then
sed -i "s|^# sts_region =.*|sts_region = $MCS_S3_STS_REGION|" /etc/columnstore/storagemanager.cnf
fi
if [ ! -z "$MCS_S3_STS_ENDPOINT" ]; then
sed -i "s|^# sts_endpoint =.*|sts_endpoint = $MCS_S3_STS_ENDPOINT|" /etc/columnstore/storagemanager.cnf
fi
sed -i "s|^bucket =.*|bucket = $MCS_S3_BUCKET|" /etc/columnstore/storagemanager.cnf
sed -i "s|^# endpoint =.*|endpoint = $MCS_S3_ENDPOINT|" /etc/columnstore/storagemanager.cnf
sed -i "s|^# aws_access_key_id =.*|aws_access_key_id = $MCS_S3_ACCESS_KEY_ID|" /etc/columnstore/storagemanager.cnf
sed -i "s|^# aws_secret_access_key =.*|aws_secret_access_key = $MCS_S3_SECRET_ACCESS_KEY|" /etc/columnstore/storagemanager.cnf
@ENGINE_BINDIR@/testS3Connection
if [ $? -ne 0 ]; then
sed -i "s|endpoint =.*|# endpoint = |" /etc/columnstore/storagemanager.cnf
sed -i "s|aws_access_key_id =.*|# aws_access_key_id = |" /etc/columnstore/storagemanager.cnf
sed -i "s|aws_secret_access_key =.*|# aws_secret_access_key = |" /etc/columnstore/storagemanager.cnf
sed -i "s|^iam_role_name =.*|# iam_role_name = |" /etc/columnstore/storagemanager.cnf
sed -i "s|^sts_region =.*|# sts_region = |" /etc/columnstore/storagemanager.cnf
sed -i "s|^sts_endpoint =.*|# sts_endpoint = |" /etc/columnstore/storagemanager.cnf
sed -i "s|^endpoint =.*|# endpoint = |" /etc/columnstore/storagemanager.cnf
sed -i "s|^aws_access_key_id =.*|# aws_access_key_id = |" /etc/columnstore/storagemanager.cnf
sed -i "s|^aws_secret_access_key =.*|# aws_secret_access_key = |" /etc/columnstore/storagemanager.cnf
echo "There was an error validating the settings used to access S3."
echo "The specified user must have GET, PUT, HEAD, and DELETE permissions to the bucket."
echo "The specified user or role must have GET, PUT, HEAD, and DELETE permissions to the bucket."
echo "Verify the following environment variables are correct:"
echo "MCS_S3_BUCKET"
echo "MCS_S3_ENDPOINT"
echo "MCS_S3_ACCESS_KEY_ID"
echo "MCS_S3_SECRET_ACCESS_KEY"
echo "MCS_S3_REGION"
echo "MCS_S3_ROLE_NAME (optional)"
echo "MCS_S3_STS_REGION (optional)"
echo "MCS_S3_STS_ENDPOINT (optional)"
echo "After environment variables are fixed, run command: columnstore-post-install"
exit 1
fi

View File

@@ -76,7 +76,8 @@ if __name__ == '__main__':
config_root.find('./SystemConfig/DataFilePlugin').text = "libcloudio.so"
cs_config.write('/etc/columnstore/Columnstore.xml.loadbrm')
os.replace('/etc/columnstore/Columnstore.xml.loadbrm', '/etc/columnstore/Columnstore.xml') # atomic replacement
os.replace('/etc/columnstore/Columnstore.xml.loadbrm',
'/etc/columnstore/Columnstore.xml') # atomic replacement
# Single-node on S3
if storage.lower() == 's3' and not bucket.lower() == 'some_bucket' and pmCount == 1:

View File

@@ -41,7 +41,8 @@ inline bool retryable_error(uint8_t s3err)
s3err == MS3_ERR_REQUEST_ERROR ||
s3err == MS3_ERR_OOM ||
s3err == MS3_ERR_IMPOSSIBLE ||
s3err == MS3_ERR_SERVER
s3err == MS3_ERR_SERVER ||
s3err == MS3_ERR_AUTH_ROLE
);
}
@@ -101,6 +102,9 @@ S3Storage::S3Storage(bool skipRetry) : skipRetryableErrors(skipRetry)
" or setting aws_access_key_id and aws_secret_access_key in storagemanager.cnf";
key = config->getValue("S3", "aws_access_key_id");
secret = config->getValue("S3", "aws_secret_access_key");
IAMrole = config->getValue("S3", "iam_role_name");
STSendpoint = config->getValue("S3", "sts_endpoint");
STSregion = config->getValue("S3", "sts_region");
if (key.empty())
{
char *_key_id = getenv("AWS_ACCESS_KEY_ID");
@@ -121,7 +125,7 @@ S3Storage::S3Storage(bool skipRetry) : skipRetryableErrors(skipRetry)
}
secret = _secret_id;
}
region = config->getValue("S3", "region");
bucket = config->getValue("S3", "bucket");
prefix = config->getValue("S3", "prefix");
@@ -135,7 +139,7 @@ S3Storage::S3Storage(bool skipRetry) : skipRetryableErrors(skipRetry)
endpoint = config->getValue("S3", "endpoint");
ms3_library_init();
//ms3_debug(true);
//ms3_debug();
testConnectivityAndPerms();
}
@@ -227,6 +231,12 @@ int S3Storage::getObject(const string &_sourceKey, boost::shared_array<uint8_t>
string sourceKey = prefix + _sourceKey;
ms3_st *creds = getConnection();
if (!creds)
{
logger->log(LOG_ERR, "S3Storage::getObject(): failed to GET, S3Storage::getConnection() returned NULL on init");
errno = EINVAL;
return -1;
}
ScopedConnection sc(this, creds);
do {
@@ -239,6 +249,10 @@ int S3Storage::getObject(const string &_sourceKey, boost::shared_array<uint8_t>
else
logger->log(LOG_ERR, "S3Storage::getObject(): failed to GET, got '%s'. bucket = %s, key = %s. Retrying...",
s3err_msgs[err], bucket.c_str(), sourceKey.c_str());
if(!IAMrole.empty())
{
ms3_assume_role(creds);
}
sleep(5);
}
} while (err && (!skipRetryableErrors && retryable_error(err)));
@@ -317,6 +331,12 @@ int S3Storage::putObject(const boost::shared_array<uint8_t> data, size_t len, co
string destKey = prefix + _destKey;
uint8_t s3err;
ms3_st *creds = getConnection();
if (!creds)
{
logger->log(LOG_ERR, "S3Storage::putObject(): failed to PUT, S3Storage::getConnection() returned NULL on init");
errno = EINVAL;
return -1;
}
ScopedConnection sc(this, creds);
do {
@@ -329,6 +349,10 @@ int S3Storage::putObject(const boost::shared_array<uint8_t> data, size_t len, co
else
logger->log(LOG_ERR, "S3Storage::putObject(): failed to PUT, got '%s'. bucket = %s, key = %s."
" Retrying...", s3err_msgs[s3err], bucket.c_str(), destKey.c_str());
if(!IAMrole.empty())
{
ms3_assume_role(creds);
}
sleep(5);
}
} while (s3err && (!skipRetryableErrors && retryable_error(s3err)));
@@ -351,8 +375,14 @@ int S3Storage::deleteObject(const string &_key)
uint8_t s3err;
string key = prefix + _key;
ms3_st *creds = getConnection();
if (!creds)
{
logger->log(LOG_ERR, "S3Storage::deleteObject(): failed to DELETE, S3Storage::getConnection() returned NULL on init");
errno = EINVAL;
return -1;
}
ScopedConnection sc(this, creds);
do {
s3err = ms3_delete(creds, bucket.c_str(), key.c_str());
if (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err)))
@@ -363,6 +393,10 @@ int S3Storage::deleteObject(const string &_key)
else
logger->log(LOG_ERR, "S3Storage::deleteObject(): failed to DELETE, got '%s'. bucket = %s, key = %s. Retrying...",
s3err_msgs[s3err], bucket.c_str(), key.c_str());
if(!IAMrole.empty())
{
ms3_assume_role(creds);
}
sleep(5);
}
} while (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err)));
@@ -385,6 +419,12 @@ int S3Storage::copyObject(const string &_sourceKey, const string &_destKey)
string sourceKey = prefix + _sourceKey, destKey = prefix + _destKey;
uint8_t s3err;
ms3_st *creds = getConnection();
if (!creds)
{
logger->log(LOG_ERR, "S3Storage::copyObject(): failed to copy, S3Storage::getConnection() returned NULL on init");
errno = EINVAL;
return -1;
}
ScopedConnection sc(this, creds);
do
@@ -398,6 +438,10 @@ int S3Storage::copyObject(const string &_sourceKey, const string &_destKey)
else
logger->log(LOG_ERR, "S3Storage::copyObject(): failed to copy, got '%s'. bucket = %s, srckey = %s, "
" destkey = %s. Retrying...", s3err_msgs[s3err], bucket.c_str(), sourceKey.c_str(), destKey.c_str());
if(!IAMrole.empty())
{
ms3_assume_role(creds);
}
sleep(5);
}
} while (s3err && (!skipRetryableErrors && retryable_error(s3err)));
@@ -435,6 +479,12 @@ int S3Storage::exists(const string &_key, bool *out)
uint8_t s3err;
ms3_status_st status;
ms3_st *creds = getConnection();
if (!creds)
{
logger->log(LOG_ERR, "S3Storage::exists(): failed to HEAD, S3Storage::getConnection() returned NULL on init");
errno = EINVAL;
return -1;
}
ScopedConnection sc(this, creds);
do {
@@ -447,6 +497,10 @@ int S3Storage::exists(const string &_key, bool *out)
else
logger->log(LOG_ERR, "S3Storage::exists(): failed to HEAD, got '%s'. bucket = %s, key = %s. Retrying...",
s3err_msgs[s3err], bucket.c_str(), key.c_str());
if(!IAMrole.empty())
{
ms3_assume_role(creds);
}
sleep(5);
}
} while (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err)));
@@ -490,11 +544,27 @@ ms3_st * S3Storage::getConnection()
// get a connection
ms3_st *ret = NULL;
uint8_t res = 0;
if (freeConns.empty())
{
ret = ms3_init(key.c_str(), secret.c_str(), region.c_str(), (endpoint.empty() ? NULL : endpoint.c_str()));
if (ret == NULL)
logger->log(LOG_ERR, "S3Storage::getConnection(): ms3_init returned NULL, no specific info to report");
if(!IAMrole.empty())
{
res = ms3_init_assume_role(ret, (IAMrole.empty() ? NULL : IAMrole.c_str()),
(STSendpoint.empty() ? NULL : STSendpoint.c_str()),
(STSregion.empty() ? NULL : STSregion.c_str()));
if (res)
{
// Something is wrong with the assume role so abort as if the ms3_init failed
logger->log(LOG_ERR, "S3Storage::getConnection(): ERROR: ms3_init_assume_role. Verify iam_role_name = %s, aws_access_key_id, aws_secret_access_key values. Also check sts_region and sts_endpoint if configured.",IAMrole.c_str());
if (ms3_server_error(ret))
logger->log(LOG_ERR, "S3Storage::getConnection(): ms3_error: server says '%s' role name = %s", ms3_server_error(ret), IAMrole.c_str());
ms3_deinit(ret);
ret = NULL;
}
}
//assert(connMutexes[ret].try_lock());
s.unlock();
return ret;

View File

@@ -55,6 +55,9 @@ class S3Storage : public CloudStorage
std::string key;
std::string secret;
std::string endpoint;
std::string IAMrole;
std::string STSendpoint;
std::string STSregion;
struct Connection
{

View File

@@ -77,11 +77,25 @@ void coreSM(int sig)
int main(int argc, char** argv)
{
SMLogging* logger = SMLogging::get();
IOCoordinator* ioc = NULL;
Cache* cache = NULL;
Synchronizer* sync = NULL;
Replicator* rep = NULL;
/* Instantiate objects to have them verify config settings before continuing */
IOCoordinator* ioc = IOCoordinator::get();
Cache* cache = Cache::get();
Synchronizer* sync = Synchronizer::get();
Replicator* rep = Replicator::get();
try
{
ioc = IOCoordinator::get();
cache = Cache::get();
sync = Synchronizer::get();
rep = Replicator::get();
}
catch (exception &e)
{
logger->log(LOG_INFO, "StorageManager init FAIL: %s", e.what());
return -1;
}
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
@@ -112,8 +126,6 @@ int main(int argc, char** argv)
int ret = 0;
SMLogging* logger = SMLogging::get();
logger->log(LOG_NOTICE,"StorageManager started.");
SessionManager* sm = SessionManager::get();

View File

@@ -55,30 +55,37 @@ void catFileOffline(const char *filename, int prefixlen)
uint8_t data[8192];
off_t offset = 0;
int read_err, write_err, count;
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
do {
count = 0;
read_err = ioc->read(filename, data, offset, 8192);
if (read_err < 0)
{
int l_errno = errno;
cerr << "Error reading " << &filename[prefixlen] << ": " << strerror_r(l_errno, (char *) data, 8192) << endl;
}
try
{
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
while (count < read_err)
{
write_err = write(STDOUT_FILENO, &data[count], read_err - count);
if (write_err < 0)
do {
count = 0;
read_err = ioc->read(filename, data, offset, 8192);
if (read_err < 0)
{
int l_errno = errno;
cerr << "Error writing to stdout: " << strerror_r(l_errno, (char *) data, 8192) << endl;
exit(1);
cerr << "Error reading " << &filename[prefixlen] << ": " << strerror_r(l_errno, (char *) data, 8192) << endl;
}
count += write_err;
}
offset += read_err;
} while (read_err > 0);
while (count < read_err)
{
write_err = write(STDOUT_FILENO, &data[count], read_err - count);
if (write_err < 0)
{
int l_errno = errno;
cerr << "Error writing to stdout: " << strerror_r(l_errno, (char *) data, 8192) << endl;
exit(1);
}
count += write_err;
}
offset += read_err;
} while (read_err > 0);
}
catch (exception &e)
{
cerr << "smcat catFileOffline FAIL: " << e.what() << endl;
}
}
void catFileOnline(const char *filename, int prefixlen)

View File

@@ -58,42 +58,49 @@ bool SMOnline()
void lsOffline(const char *path)
{
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
vector<string> listing;
int err = ioc->listDirectory(path, &listing);
if (err)
exit(1);
struct stat _stat;
boost::filesystem::path base(path);
boost::filesystem::path p;
cout.fill(' ');
for (auto &entry : listing)
try
{
p = base / entry;
err = ioc->stat(p.string().c_str(), &_stat);
if (!err)
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
vector<string> listing;
int err = ioc->listDirectory(path, &listing);
if (err)
exit(1);
struct stat _stat;
boost::filesystem::path base(path);
boost::filesystem::path p;
cout.fill(' ');
for (auto &entry : listing)
{
if (_stat.st_mode & S_IFDIR)
p = base / entry;
err = ioc->stat(p.string().c_str(), &_stat);
if (!err)
{
cout << "d";
cout.width(14);
if (_stat.st_mode & S_IFDIR)
{
cout << "d";
cout.width(14);
}
else
cout.width(15);
struct tm *my_tm = localtime(&_stat.st_mtim.tv_sec);
char date[100];
strftime(date, 100, "%b %e %H:%M", my_tm);
cout << right << _stat.st_size << left << " " << date << left << " " << entry << endl;
}
else
{
cout.width(15);
struct tm *my_tm = localtime(&_stat.st_mtim.tv_sec);
char date[100];
strftime(date, 100, "%b %e %H:%M", my_tm);
cout << right << _stat.st_size << left << " " << date << left << " " << entry << endl;
}
else
{
cout.width(15);
cout << right << "error" << left << " " << entry << endl;
cout << right << "error" << left << " " << entry << endl;
}
}
}
catch (exception &e)
{
cerr << "smls lsOffline FAIL: " << e.what() << endl;
}
}
void lsOnline(const char *path)

View File

@@ -57,42 +57,48 @@ void putOffline(const char *fname, int prefixlen)
uint8_t data[8192];
int read_err, write_err;
ssize_t count, offset = 0;
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
struct stat _stat;
read_err = ioc->open(fname, O_CREAT | O_TRUNC | O_WRONLY, &_stat);
if (read_err < 0)
try
{
int l_errno = errno;
cerr << "Failed to open/create " << &fname[prefixlen] << ": " <<
strerror_r(l_errno, (char *) data, 8192) << endl;
exit(1);
}
do
{
read_err = ::read(STDIN_FILENO, data, 8192);
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
struct stat _stat;
read_err = ioc->open(fname, O_CREAT | O_TRUNC | O_WRONLY, &_stat);
if (read_err < 0)
{
int l_errno = errno;
cerr << "Error reading stdin: " << strerror_r(l_errno, (char *) data, 8192) << endl;
cerr << "Failed to open/create " << &fname[prefixlen] << ": " <<
strerror_r(l_errno, (char *) data, 8192) << endl;
exit(1);
}
count = 0;
while (count < read_err)
do
{
write_err = ioc->write(fname, &data[count], offset + count, read_err - count);
if (write_err < 0)
read_err = ::read(STDIN_FILENO, data, 8192);
if (read_err < 0)
{
int l_errno = errno;
cerr << "Error writing to " << &fname[prefixlen] << ": " <<
strerror_r(l_errno, (char *) data, 8192) << endl;
cerr << "Error reading stdin: " << strerror_r(l_errno, (char *) data, 8192) << endl;
exit(1);
}
count += write_err;
}
offset += read_err;
} while (read_err > 0);
count = 0;
while (count < read_err)
{
write_err = ioc->write(fname, &data[count], offset + count, read_err - count);
if (write_err < 0)
{
int l_errno = errno;
cerr << "Error writing to " << &fname[prefixlen] << ": " <<
strerror_r(l_errno, (char *) data, 8192) << endl;
exit(1);
}
count += write_err;
}
offset += read_err;
} while (read_err > 0);
}
catch (exception &e)
{
cerr << "smput putOffline FAIL: " << e.what() << endl;
}
}
void putOnline(const char *fname, int prefixlen)

View File

@@ -54,14 +54,21 @@ bool SMOnline()
void rmOffline(int argCount, const char **args, const char *prefix, uint prefixlen)
{
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
char buf[16384];
strncpy(buf, prefix, prefixlen);
for (int i = 1; i < argCount; i++)
try
{
memcpy(&buf[prefixlen], args[i], min(16383 - prefixlen, strlen(args[i])) + 1);
ioc->unlink(buf);
boost::scoped_ptr<IOCoordinator> ioc(IOCoordinator::get());
char buf[16384];
strncpy(buf, prefix, prefixlen);
for (int i = 1; i < argCount; i++)
{
memcpy(&buf[prefixlen], args[i], min(16383 - prefixlen, strlen(args[i])) + 1);
ioc->unlink(buf);
}
}
catch (exception &e)
{
cerr << "smrm rmOffline FAIL: " << e.what() << endl;
}
}

View File

@@ -110,6 +110,20 @@ bucket = some_bucket
# aws_access_key_id =
# aws_secret_access_key =
# If you want StorageManager to assume an IAM role to use for its S3
# accesses, specify the name of the role in iam_role_name. The name
# should be only the name, rather than the full path.
#
# The specified role must already exist and have permission to get, put,
# delete, and 'head' on the specified S3 bucket.
# iam_role_name =
# If an IAM role is specified, sts_endpoint and sts_region are used to specify
# which STS server & region to use to assume the role. The default for
# sts_endpoint is 'sts.amazonaws.com', and the default for sts_region is
# 'us-east-1'.
# sts_region =
# sts_endpoint =
# The LocalStorage section configures the 'local storage' module
# if specified by ObjectStorage/service.

View File

@@ -7,12 +7,14 @@ SET(S3_SOURCES ${S3API_DIR}/src/debug.c
${S3API_DIR}/src/response.c
${S3API_DIR}/src/sha256.c
${S3API_DIR}/src/sha256-internal.c
${S3API_DIR}/src/xml.c)
${S3API_DIR}/src/xml.c
${S3API_DIR}/src/assume_role.c)
ADD_LIBRARY(marias3 SHARED ${S3_SOURCES})
TARGET_LINK_LIBRARIES(marias3 curl)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR} ${S3API_DIR})
add_definitions(-D_GNU_SOURCE)
set(S3API_DEPS marias3 curl CACHE INTERNAL "S3API_DEPS")