diff --git a/.drone.jsonnet b/.drone.jsonnet index 0549a2d67..fcfc2fe89 100644 --- a/.drone.jsonnet +++ b/.drone.jsonnet @@ -1,15 +1,19 @@ local platforms = { develop: ['opensuse/leap:15', 'centos:7', 'centos:8', 'debian:9', 'debian:10', 'ubuntu:16.04', 'ubuntu:18.04', 'ubuntu:20.04'], + 'develop-1.5': ['opensuse/leap:15', 'centos:7', 'centos:8', 'debian:9', 'debian:10', 'ubuntu:16.04', 'ubuntu:18.04', 'ubuntu:20.04'], + 'columnstore-1.5.4-1': ['opensuse/leap:15', 'centos:7', 'centos:8', 'debian:9', 'debian:10', 'ubuntu:16.04', 'ubuntu:18.04', 'ubuntu:20.04'], 'develop-1.4': ['centos:7', 'centos:8', 'debian:9', 'debian:10', 'ubuntu:16.04', 'ubuntu:18.04', 'ubuntu:20.04'], }; local codebase_map = { - develop: 'git clone --recurse-submodules --branch 10.5 --depth 1 https://github.com/MariaDB/server .', + develop: 'git clone --recurse-submodules --branch 10.6 --depth 1 https://github.com/MariaDB/server .', + 'develop-1.5': 'git clone --recurse-submodules --branch 10.5 --depth 1 https://github.com/MariaDB/server .', + 'columnstore-1.5.4-1': 'git clone --recurse-submodules --branch 10.5 --depth 1 https://github.com/MariaDB/server .', 'develop-1.4': 'git clone --recurse-submodules --branch 10.4-enterprise --depth 1 https://github.com/mariadb-corporation/MariaDBEnterprise .', }; local builddir = 'verylongdirnameforverystrangecpackbehavior'; -local cmakeflags = '-DCMAKE_BUILD_TYPE=RelWithDebInfo -DPLUGIN_COLUMNSTORE=YES -DPLUGIN_MROONGA=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_TOKUDB=NO -DPLUGIN_CONNECT=NO -DPLUGIN_SPIDER=NO -DPLUGIN_OQGRAPH=NO -DPLUGIN_PERFSCHEMA=NO -DPLUGIN_SPHINX=NO'; +local cmakeflags = '-DCMAKE_BUILD_TYPE=RelWithDebInfo -DPLUGIN_COLUMNSTORE=YES -DPLUGIN_MROONGA=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_TOKUDB=NO -DPLUGIN_CONNECT=NO -DPLUGIN_SPIDER=NO -DPLUGIN_OQGRAPH=NO -DPLUGIN_SPHINX=NO'; local rpm_build_deps = 'install -y systemd-devel git make gcc gcc-c++ libaio-devel openssl-devel boost-devel bison snappy-devel flex libcurl-devel libxml2-devel ncurses-devel automake libtool policycoreutils-devel rpm-build lsof iproute pam-devel perl-DBI cracklib-devel expect readline-devel createrepo'; @@ -18,6 +22,8 @@ local deb_build_deps = 'apt update && apt install --yes --no-install-recommends local platformMap(branch, platform) = local branch_cmakeflags_map = { develop: ' -DBUILD_CONFIG=mysql_release -DWITH_WSREP=OFF', + 'develop-1.5': ' -DBUILD_CONFIG=mysql_release -DWITH_WSREP=OFF', + 'columnstore-1.5.4-1': ' -DBUILD_CONFIG=mysql_release -DWITH_WSREP=OFF', 'develop-1.4': ' -DBUILD_CONFIG=enterprise', }; @@ -81,8 +87,6 @@ local Pipeline(branch, platform, event) = { commands: [ // clone mtr repo 'git clone --depth 1 https://github.com/mariadb-corporation/columnstore-tests', - // temporary disable insert_from_another_table test. see https://jira.mariadb.org/browse/MCOL-4247 - 'rm -f columnstore-tests/mysql-test/suite/columnstore/basic/t/mcs24_insert_from_another_table.test', 'docker run --volume /sys/fs/cgroup:/sys/fs/cgroup:ro --env DEBIAN_FRONTEND=noninteractive --env MCS_USE_S3_STORAGE=0 --name mtr$${DRONE_BUILD_NUMBER} --privileged --detach ' + img + ' ' + init + ' --unit=basic.target', 'docker cp result mtr$${DRONE_BUILD_NUMBER}:/', if (std.split(platform, ':')[0] == 'centos') then 'docker exec -t mtr$${DRONE_BUILD_NUMBER} bash -c "yum install -y epel-release which rsyslog hostname && yum install -y /result/*.' + pkg_format + '"' else '', @@ -90,7 +94,7 @@ local Pipeline(branch, platform, event) = { if (std.split(platform, '/')[0] == 'opensuse') then 'docker exec -t mtr$${DRONE_BUILD_NUMBER} bash -c "zypper install -y which hostname rsyslog && zypper install -y --allow-unsigned-rpm /result/*.' + pkg_format + '"' else '', 'docker cp columnstore-tests/mysql-test/suite/columnstore mtr$${DRONE_BUILD_NUMBER}:/usr/share/mysql-test/suite/', 'docker exec -t mtr$${DRONE_BUILD_NUMBER} systemctl start mariadb', - 'docker exec -t mtr$${DRONE_BUILD_NUMBER} bash -c "cd /usr/share/mysql-test && ./mtr --force --max-test-fail=0 --suite=columnstore/basic --extern socket=/var/lib/mysql/mysql.sock"', + 'docker exec -t mtr$${DRONE_BUILD_NUMBER} bash -c "cd /usr/share/mysql-test && ./mtr --force --max-test-fail=0 --suite=columnstore/basic --skip-test-list=suite/columnstore/basic/failed.def --extern socket=/var/lib/mysql/mysql.sock"', ], }, mtrlog:: { @@ -116,6 +120,7 @@ local Pipeline(branch, platform, event) = { regression:: { name: 'regression', image: 'docker:git', + #failure: (if event == 'cron' then 'ignore' else ''), failure: 'ignore', volumes: [pipeline._volumes.docker, pipeline._volumes.mdb], commands: [ @@ -267,8 +272,9 @@ local Pipeline(branch, platform, event) = { "sed -i -e 's/\"galera-enterprise-4\"//' cmake/cpack_rpm.cmake", "sed -i '/columnstore/Id' debian/autobake-deb.sh", "sed -i 's/.*flex.*/echo/' debian/autobake-deb.sh", - "sed -i 's/BETA/GAMMA/' storage/columnstore/CMakeLists.txt", - "sed -i -e '/mcs-start-storagemanager.py/d' debian/mariadb-plugin-columnstore.install", + "sed -i 's/-DPLUGIN_PERFSCHEMA=NO//' debian/autobake-deb.sh", + #"sed -i 's/BETA/GAMMA/' storage/columnstore/CMakeLists.txt", + #"sed -i -e '/mcs-start-storagemanager.py/d' debian/mariadb-plugin-columnstore.install", platformMap(branch, platform), (if pkg_format == 'rpm' then 'createrepo .' else 'dpkg-scanpackages ../ | gzip > ../Packages.gz '), ], @@ -292,9 +298,9 @@ local Pipeline(branch, platform, event) = { ], }, ] + - (if (platform == 'centos:8' && event == 'cron') then [pipeline.dockerfile] else []) + - (if (platform == 'centos:8' && event == 'cron') then [pipeline.docker] else []) + - (if (platform == 'centos:8' && event == 'cron') then [pipeline.ecr] else []) + + #(if (platform == 'centos:8' && event == 'cron') then [pipeline.dockerfile] else []) + + #(if (platform == 'centos:8' && event == 'cron') then [pipeline.docker] else []) + + #(if (platform == 'centos:8' && event == 'cron') then [pipeline.ecr] else []) + (if (platform == 'centos:7') then [pipeline.mtr] else []) + (if (platform == 'centos:7') then [pipeline.mtrlog] else []) + (if platform != 'centos:7' then [pipeline.smoke] else []) + @@ -380,11 +386,11 @@ local FinalPipeline(branch, event) = { [ Pipeline(b, p, e) - for b in ['develop', 'develop-1.4'] + for b in ['develop', 'develop-1.5', 'develop-1.4', 'columnstore-1.5.4-1'] for p in platforms[b] for e in ['pull_request', 'cron', 'custom'] ] + [ FinalPipeline(b, e) - for b in ['develop', 'develop-1.4'] + for b in ['develop', 'develop-1.5', 'develop-1.4', 'columnstore-1.5.4-1'] for e in ['pull_request', 'cron', 'custom'] ] diff --git a/VERSION b/VERSION index c65a2d791..e2ed61dbf 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ COLUMNSTORE_VERSION_MAJOR=1 -COLUMNSTORE_VERSION_MINOR=5 -COLUMNSTORE_VERSION_PATCH=4 +COLUMNSTORE_VERSION_MINOR=6 +COLUMNSTORE_VERSION_PATCH=0 COLUMNSTORE_VERSION_RELEASE=1 diff --git a/dbcon/joblist/jlf_subquery.cpp b/dbcon/joblist/jlf_subquery.cpp index ac2f4b950..d7452cb7b 100644 --- a/dbcon/joblist/jlf_subquery.cpp +++ b/dbcon/joblist/jlf_subquery.cpp @@ -819,11 +819,17 @@ void addOrderByAndLimit(CalpontSelectExecutionPlan* csep, JobInfo& jobInfo) { sc = dynamic_cast(jobInfo.deliveredCols[sc->orderPos()].get()); - // TODO Investigate why this could be NULL + // If sc is NULL it's most likely a scaler subquery if (sc == NULL) { const ReturnedColumn* rc = dynamic_cast(orderByCols[i].get()); - uint64_t eid = rc->expressionId(); + uint32_t eid = rc->expressionId(); + // If eid is -1, then there's no corresponding + // entry in tupleKeyMap and it will assert down the line + // Don't add the order by. It won't work and ordering on + // a singleton is a waste anyway. + if ((int32_t)eid == -1) + continue; CalpontSystemCatalog::ColType ct = rc->resultType(); tupleKey = getExpTupleKey(jobInfo, eid); jobInfo.orderByColVec.push_back(make_pair(tupleKey, orderByCols[i]->asc())); diff --git a/dbcon/mysql/ha_mcs.cpp b/dbcon/mysql/ha_mcs.cpp index 5d9c2fbf8..cc8549435 100644 --- a/dbcon/mysql/ha_mcs.cpp +++ b/dbcon/mysql/ha_mcs.cpp @@ -1273,10 +1273,12 @@ static my_bool cache_check_status(void *param) static ha_mcs_cache_share *cache_share_list= 0; static PSI_mutex_key key_LOCK_cache_share; +#ifdef HAVE_PSI_INTERFACE static PSI_mutex_info all_mutexes[]= { { &key_LOCK_cache_share, "LOCK_cache_share", PSI_FLAG_GLOBAL}, }; +#endif static mysql_mutex_t LOCK_cache_share; /* @@ -1810,8 +1812,10 @@ static int columnstore_init_func(void* p) mcs_hton->create_select = create_columnstore_select_handler; mcs_hton->db_type = DB_TYPE_AUTOASSIGN; +#ifdef HAVE_PSI_INTERFACE uint count = sizeof(all_mutexes)/sizeof(all_mutexes[0]); mysql_mutex_register("ha_mcs_cache", all_mutexes, count); +#endif mysql_mutex_init(key_LOCK_cache_share, &LOCK_cache_share, MY_MUTEX_INIT_FAST); DBUG_RETURN(0); diff --git a/oam/install_scripts/columnstore-post-install.in b/oam/install_scripts/columnstore-post-install.in index b084e5116..8be66cb8b 100755 --- a/oam/install_scripts/columnstore-post-install.in +++ b/oam/install_scripts/columnstore-post-install.in @@ -259,13 +259,13 @@ fi if [ ! -z "$MCS_USE_S3_STORAGE" ] && [ $MCS_USE_S3_STORAGE -eq 1 ]; then if [ -z "$MCS_S3_BUCKET" ]; then - echo "Environmental variable \$MCS_USE_S3_STORAGE is set but there is no \$MCS_S3_BUCKET." + echo "Environment variable \$MCS_USE_S3_STORAGE is set but there is no \$MCS_S3_BUCKET." fi if [ -z "$MCS_S3_ACCESS_KEY_ID" ]; then - echo "Environmental variable \$MCS_USE_S3_STORAGE is set but there is no \$MCS_S3_ACCESS_KEY_ID." + echo "Environment variable \$MCS_USE_S3_STORAGE is set but there is no \$MCS_S3_ACCESS_KEY_ID." fi if [ -z "$MCS_S3_SECRET_ACCESS_KEY" ]; then - echo "Environmental variable \$MCS_USE_S3_STORAGE is set but there is no \$MCS_S3_SECRET_ACCESS_KEY." + echo "Environment variable \$MCS_USE_S3_STORAGE is set but there is no \$MCS_S3_SECRET_ACCESS_KEY." fi if [ -z "$MCS_S3_BUCKET" ] || [ -z "$MCS_S3_ACCESS_KEY_ID" ] || [ -z "$MCS_S3_SECRET_ACCESS_KEY" ]; then echo "Using local storage." @@ -281,7 +281,24 @@ if [ ! -z "$MCS_USE_S3_STORAGE" ] && [ $MCS_USE_S3_STORAGE -eq 1 ]; then sed -i "s|# endpoint =.*|endpoint = $MCS_S3_ENDPOINT|" /etc/columnstore/storagemanager.cnf sed -i "s|# aws_access_key_id =.*|aws_access_key_id = $MCS_S3_ACCESS_KEY_ID|" /etc/columnstore/storagemanager.cnf sed -i "s|# aws_secret_access_key =.*|aws_secret_access_key = $MCS_S3_SECRET_ACCESS_KEY|" /etc/columnstore/storagemanager.cnf - fi + + testS3Connection + if [ $? -ne 0 ]; then + sed -i "s|endpoint =.*|# endpoint = |" /etc/columnstore/storagemanager.cnf + sed -i "s|aws_access_key_id =.*|# aws_access_key_id = |" /etc/columnstore/storagemanager.cnf + sed -i "s|aws_secret_access_key =.*|# aws_secret_access_key = |" /etc/columnstore/storagemanager.cnf + echo "There was an error validating the settings used to access S3." + echo "The specified user must have GET, PUT, HEAD, and DELETE permissions to the bucket." + echo "Verify the following environment variables are correct:" + echo "MCS_S3_BUCKET" + echo "MCS_S3_ENDPOINT" + echo "MCS_S3_ACCESS_KEY_ID" + echo "MCS_S3_SECRET_ACCESS_KEY" + echo "MCS_S3_REGION" + echo "After environment variables are fixed, run command: columnstore-post-install" + exit 1 + fi + fi fi #change ownership/permissions to be able to run columnstore as non-root diff --git a/oam/install_scripts/columnstore-pre-uninstall.in b/oam/install_scripts/columnstore-pre-uninstall.in index a23fe2138..600c88015 100755 --- a/oam/install_scripts/columnstore-pre-uninstall.in +++ b/oam/install_scripts/columnstore-pre-uninstall.in @@ -109,6 +109,11 @@ if [ -n "$systemctl" ] && [ $(running_systemd) -eq 0 ]; then rm -f /lib/systemd/system/mcs-storagemanager.service systemctl daemon-reload + # remove flag to prevent clusters using shared storage from initializing columnstore more than once + IFLAG=/var/lib/columnstore/storagemanager/cs-initialized + if [ -e $IFLAG ]; then + rm $IFLAG + fi else chkconfig=`which chkconfig 2>/dev/null` if [ -n "$chkconfig" ]; then diff --git a/oam/install_scripts/mcs-loadbrm.py.in b/oam/install_scripts/mcs-loadbrm.py.in index 9a2990078..5cc3a9d43 100755 --- a/oam/install_scripts/mcs-loadbrm.py.in +++ b/oam/install_scripts/mcs-loadbrm.py.in @@ -70,7 +70,9 @@ if __name__ == '__main__': config_root.find('./SystemConfig').append(ET.Element("DataFilePlugin")) config_root.find('./SystemConfig/DataFilePlugin').text = "libcloudio.so" - cs_config.write('/etc/columnstore/Columnstore.xml') + + cs_config.write('/etc/columnstore/Columnstore.xml.loadbrm') + os.replace('/etc/columnstore/Columnstore.xml.loadbrm', '/etc/columnstore/Columnstore.xml') # atomic replacement # Single-node on S3 if storage.lower() == 's3' and not bucket.lower() == 'some_bucket' and pmCount == 1: @@ -124,7 +126,7 @@ node.".format(e)) path.write_bytes(r.content) except Exception as e: print(str(e)) - print('Failed to load meta data from the primary \ + print('Failed to load BRM data from the primary \ node {}.'.format(primary_address), file=sys.stderr) sys.exit(1) diff --git a/storage-manager/src/S3Storage.cpp b/storage-manager/src/S3Storage.cpp index f37e5d90a..03964cce1 100644 --- a/storage-manager/src/S3Storage.cpp +++ b/storage-manager/src/S3Storage.cpp @@ -89,7 +89,7 @@ S3Storage::ScopedConnection::~ScopedConnection() s3->returnConnection(conn); } -S3Storage::S3Storage() +S3Storage::S3Storage(bool skipRetry) : skipRetryableErrors(skipRetry) { /* Check creds from envvars Get necessary vars from config @@ -231,7 +231,7 @@ int S3Storage::getObject(const string &_sourceKey, boost::shared_array do { err = ms3_get(creds, bucket.c_str(), sourceKey.c_str(), &_data, &len); - if (err && retryable_error(err)) + if (err && (!skipRetryableErrors && retryable_error(err))) { if (ms3_server_error(creds)) logger->log(LOG_ERR, "S3Storage::getObject(): failed to GET, server says '%s'. bucket = %s, key = %s." @@ -241,7 +241,7 @@ int S3Storage::getObject(const string &_sourceKey, boost::shared_array s3err_msgs[err], bucket.c_str(), sourceKey.c_str()); sleep(5); } - } while (err && retryable_error(err)); + } while (err && (!skipRetryableErrors && retryable_error(err))); if (err) { if (ms3_server_error(creds)) @@ -321,7 +321,7 @@ int S3Storage::putObject(const boost::shared_array data, size_t len, co do { s3err = ms3_put(creds, bucket.c_str(), destKey.c_str(), data.get(), len); - if (s3err && retryable_error(s3err)) + if (s3err && (!skipRetryableErrors && retryable_error(s3err))) { if (ms3_server_error(creds)) logger->log(LOG_ERR, "S3Storage::putObject(): failed to PUT, server says '%s'. bucket = %s, key = %s." @@ -331,7 +331,7 @@ int S3Storage::putObject(const boost::shared_array data, size_t len, co " Retrying...", s3err_msgs[s3err], bucket.c_str(), destKey.c_str()); sleep(5); } - } while (s3err && retryable_error(s3err)); + } while (s3err && (!skipRetryableErrors && retryable_error(s3err))); if (s3err) { if (ms3_server_error(creds)) @@ -355,7 +355,7 @@ int S3Storage::deleteObject(const string &_key) do { s3err = ms3_delete(creds, bucket.c_str(), key.c_str()); - if (s3err && s3err != MS3_ERR_NOT_FOUND && retryable_error(s3err)) + if (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err))) { if (ms3_server_error(creds)) logger->log(LOG_ERR, "S3Storage::deleteObject(): failed to DELETE, server says '%s'. bucket = %s, key = %s." @@ -365,7 +365,7 @@ int S3Storage::deleteObject(const string &_key) s3err_msgs[s3err], bucket.c_str(), key.c_str()); sleep(5); } - } while (s3err && s3err != MS3_ERR_NOT_FOUND && retryable_error(s3err)); + } while (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err))); if (s3err != 0 && s3err != MS3_ERR_NOT_FOUND) { @@ -390,7 +390,7 @@ int S3Storage::copyObject(const string &_sourceKey, const string &_destKey) do { s3err = ms3_copy(creds, bucket.c_str(), sourceKey.c_str(), bucket.c_str(), destKey.c_str()); - if (s3err && retryable_error(s3err)) + if (s3err && (!skipRetryableErrors && retryable_error(s3err))) { if (ms3_server_error(creds)) logger->log(LOG_ERR, "S3Storage::copyObject(): failed to copy, server says '%s'. bucket = %s, srckey = %s, " @@ -400,7 +400,7 @@ int S3Storage::copyObject(const string &_sourceKey, const string &_destKey) " destkey = %s. Retrying...", s3err_msgs[s3err], bucket.c_str(), sourceKey.c_str(), destKey.c_str()); sleep(5); } - } while (s3err && retryable_error(s3err)); + } while (s3err && (!skipRetryableErrors && retryable_error(s3err))); if (s3err) { @@ -439,7 +439,7 @@ int S3Storage::exists(const string &_key, bool *out) do { s3err = ms3_status(creds, bucket.c_str(), key.c_str(), &status); - if (s3err && s3err != MS3_ERR_NOT_FOUND && retryable_error(s3err)) + if (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err))) { if (ms3_server_error(creds)) logger->log(LOG_ERR, "S3Storage::exists(): failed to HEAD, server says '%s'. bucket = %s, key = %s." @@ -449,7 +449,7 @@ int S3Storage::exists(const string &_key, bool *out) s3err_msgs[s3err], bucket.c_str(), key.c_str()); sleep(5); } - } while (s3err && s3err != MS3_ERR_NOT_FOUND && retryable_error(s3err)); + } while (s3err && s3err != MS3_ERR_NOT_FOUND && (!skipRetryableErrors && retryable_error(s3err))); if (s3err != 0 && s3err != MS3_ERR_NOT_FOUND) { diff --git a/storage-manager/src/S3Storage.h b/storage-manager/src/S3Storage.h index 3b9b4ae2a..ba20a6b7c 100644 --- a/storage-manager/src/S3Storage.h +++ b/storage-manager/src/S3Storage.h @@ -30,7 +30,8 @@ namespace storagemanager class S3Storage : public CloudStorage { public: - S3Storage(); + S3Storage(bool skipRetry = false); + virtual ~S3Storage(); int getObject(const std::string &sourceKey, const std::string &destFile, size_t *size = NULL); @@ -46,6 +47,8 @@ class S3Storage : public CloudStorage ms3_st *getConnection(); void returnConnection(ms3_st *); + bool skipRetryableErrors; + std::string bucket; // might store this as a char *, since it's only used that way std::string prefix; std::string region; diff --git a/storage-manager/src/testS3Connection.cpp b/storage-manager/src/testS3Connection.cpp index aa0071d1b..a20d8d828 100644 --- a/storage-manager/src/testS3Connection.cpp +++ b/storage-manager/src/testS3Connection.cpp @@ -35,7 +35,7 @@ int s3TestConnection() int ret = 0; try { - S3Storage* s3 = new S3Storage(); + S3Storage* s3 = new S3Storage(true); cout << "S3 Storage Manager Configuration OK" << endl; delete s3; } diff --git a/utils/funcexp/func_insert.cpp b/utils/funcexp/func_insert.cpp index 8e5864433..b8fa0588c 100644 --- a/utils/funcexp/func_insert.cpp +++ b/utils/funcexp/func_insert.cpp @@ -68,6 +68,7 @@ std::string Func_insert::getStrVal(rowgroup::Row& row, start = fp[1]->data()->getIntVal(row, isNull); if (isNull) return ""; + start--; // Because SQL syntax is 1 based and we want 0 based. length = fp[2]->data()->getIntVal(row, isNull); if (isNull) @@ -83,14 +84,12 @@ std::string Func_insert::getStrVal(rowgroup::Row& row, int64_t strLen = cs->numchars(pos, end); // Return the original string if start isn't within the string. - if ((start < 1) || start >= strLen) + if ((start < 0) || start >= strLen) return src; if ((length < 0) || (length > strLen)) length = strLen; - start--; // Because SQL syntax is 1 based and we want 0 based. - // Convert start and length from characters to bytes. start = cs->charpos(pos, end, start); length = cs->charpos(pos+start, end, length);