1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

Merge branch 'stable-23.10' into MCOL-4240

This commit is contained in:
Leonid Fedorov
2025-07-11 16:30:30 +04:00
committed by GitHub
110 changed files with 1018 additions and 315 deletions

View File

@ -4,32 +4,21 @@ local events = ["pull_request", "cron"];
local current_branch = "stable-23.10";
local servers = {
"stable-23.10": ["10.6-enterprise"],
[current_branch]: ["10.6-enterprise"],
};
local platforms = {
"stable-23.10": ["rockylinux:8", "rockylinux:9", "debian:12", "ubuntu:22.04", "ubuntu:24.04"],
[current_branch]: ["rockylinux:8", "rockylinux:9", "debian:12", "ubuntu:22.04", "ubuntu:24.04"],
};
local platforms_arm = {
"stable-23.10": ["rockylinux:8", "rockylinux:9", "debian:12", "ubuntu:22.04", "ubuntu:24.04"],
[current_branch]: ["rockylinux:8", "rockylinux:9", "debian:12", "ubuntu:22.04", "ubuntu:24.04"],
};
local rewrite_ubuntu_mirror = @"sed -i 's|//\\(us\\.\\)\\?archive\\.ubuntu\\.com|//us.archive.ubuntu.com|g' /etc/apt/sources.list || true; " +
@"sed -i 's|//\\(us\\.\\)\\?archive\\.ubuntu\\.com|//us.archive.ubuntu.com|g' /etc/apt/sources.list.d/ubuntu.sources || true; " +
"cat /etc/apt/sources.list.d/ubuntu.sources /etc/apt/sources.list | grep archive || true; ";
local builddir = "verylongdirnameforverystrangecpackbehavior";
local customEnvCommandsMap = {
// 'clang-18': ['apt install -y clang-18', 'export CC=/usr/bin/clang-18', 'export CXX=/usr/bin/clang++-18'],
"clang-20": [
rewrite_ubuntu_mirror,
"apt-get clean && apt-get update",
"apt-get install -y wget curl lsb-release software-properties-common gnupg",
"wget https://apt.llvm.org/llvm.sh",
"bash llvm.sh 20",
"export CC=/usr/bin/clang",
"export CXX=/usr/bin/clang++",
],
// 'clang-18': ["bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/install_clang_deb.sh 18"],
"clang-20": ["bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/install_clang_deb.sh 20"],
};
local customEnvCommands(envkey, builddir) =
@ -56,12 +45,7 @@ local customBootstrapParamsForAdditionalPipelinesMap = {
local any_branch = "**";
local platforms_custom = platforms[current_branch];
local platforms_arm_custom = platforms_arm[current_branch];
local platforms_mtr = platforms[current_branch];
local builddir = "verylongdirnameforverystrangecpackbehavior";
local mtr_suite_list = "basic,bugfixes";
local mtr_full_set = "basic,bugfixes,devregression,autopilot,extended,multinode,oracle,1pmonly";
@ -95,59 +79,14 @@ local upgrade_test_lists = {
},
};
local testRun(platform) =
local platform_map = {
"rockylinux:8": "ctest3 -R columnstore: -j $(nproc) --output-on-failure",
"rockylinux:9": "ctest3 -R columnstore: -j $(nproc) --output-on-failure",
"debian:12": "cd builddir; ctest -R columnstore: -j $(nproc) --output-on-failure",
"ubuntu:20.04": "cd builddir; ctest -R columnstore: -j $(nproc) --output-on-failure",
"ubuntu:22.04": "cd builddir; ctest -R columnstore: -j $(nproc) --output-on-failure",
"ubuntu:24.04": "cd builddir; ctest -R columnstore: -j $(nproc) --output-on-failure",
};
platform_map[platform];
local gcc_version = "11";
local rockylinux8_deps = "dnf install -y 'dnf-command(config-manager)' " +
"&& dnf config-manager --set-enabled powertools " +
"&& dnf install -y gcc-toolset-" + gcc_version + " libarchive cmake " +
"&& . /opt/rh/gcc-toolset-" + gcc_version + "/enable ";
local rockylinux9_deps = "dnf install -y 'dnf-command(config-manager)' " +
"&& dnf config-manager --set-enabled crb " +
"&& dnf install -y gcc gcc-c++";
local rockylinux_common_deps = " && dnf install -y git lz4 lz4-devel cppunit-devel cmake3 boost-devel snappy-devel pcre2-devel";
local deb_deps = rewrite_ubuntu_mirror + "apt-get clean && apt-get update && apt-get install --yes git libboost-all-dev libcppunit-dev libsnappy-dev cmake libpcre2-dev";
local testPreparation(platform) =
local platform_map = {
"rockylinux:8": rockylinux8_deps + rockylinux_common_deps,
"rockylinux:9": rockylinux9_deps + rockylinux_common_deps,
"debian:12": deb_deps,
"ubuntu:20.04": deb_deps,
"ubuntu:22.04": deb_deps,
"ubuntu:24.04": deb_deps,
};
platform_map[platform];
local make_clickable_link(link) = "echo -e '\\e]8;;" + link + "\\e\\\\" + link + "\\e]8;;\\e\\\\'";
local echo_running_on = ["echo running on ${DRONE_STAGE_MACHINE}",
make_clickable_link("https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#Instances:search=:${DRONE_STAGE_MACHINE};v=3;$case=tags:true%5C,client:false;$regex=tags:false%5C,client:false;sort=desc:launchTime")];
local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise", customBootstrapParams="", customBuildEnvCommandsMapKey="") = {
local pkg_format = if (std.split(platform, ":")[0] == "rockylinux") then "rpm" else "deb",
local init = if (pkg_format == "rpm") then "/usr/lib/systemd/systemd" else "systemd",
local mtr_path = if (pkg_format == "rpm") then "/usr/share/mysql-test" else "/usr/share/mysql/mysql-test",
local cmapi_path = "/usr/share/columnstore/cmapi",
local etc_path = "/etc/columnstore",
local socket_path = if (pkg_format == "rpm") then "/var/lib/mysql/mysql.sock" else "/run/mysqld/mysqld.sock",
local config_path_prefix = if (pkg_format == "rpm") then "/etc/my.cnf.d/" else "/etc/mysql/mariadb.conf.d/50-",
local img = if (platform == "rockylinux:8") then platform else "detravi/" + std.strReplace(platform, "/", "-"),
local branch_ref = if (branch == any_branch) then current_branch else branch,
// local regression_tests = if (std.startsWith(platform, 'debian') || std.startsWith(platform, 'ubuntu:20')) then 'test000.sh' else 'test000.sh,test001.sh',
local branchp = if (branch == "**") then "" else branch + "/",
local brancht = if (branch == "**") then "" else branch + "-",
@ -163,20 +102,13 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
local server_remote = if (std.endsWith(server, "enterprise")) then "https://github.com/mariadb-corporation/MariaDBEnterprise" else "https://github.com/MariaDB/server",
local sccache_arch = if (arch == "amd64") then "x86_64" else "aarch64",
local get_sccache = ["echo getting sccache...",
rewrite_ubuntu_mirror,
"(apt-get clean && apt-get update -y && apt-get install -y curl || yum install -y curl || true)",
"curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.10.0/sccache-v0.10.0-" + sccache_arch + "-unknown-linux-musl.tar.gz &&",
"tar xzf sccache.tar.gz",
"install sccache*/sccache /usr/local/bin/ && echo sccache installed"],
local pipeline = self,
publish(step_prefix="pkg", eventp=event + "/${DRONE_BUILD_NUMBER}"):: {
name: "publish " + step_prefix,
depends_on: [std.strReplace(step_prefix, " latest", ""), "createrepo"],
image: "amazon/aws-cli:2.22.30",
volumes: [pipeline._volumes.mdb],
when: {
status: ["success", "failure"],
},
@ -191,13 +123,26 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
AWS_DEFAULT_REGION: "us-east-1",
},
commands: [
"ls " + result,
'[ -z "$(ls -A "' + result + '")" ] && echo Nothing to publish! && exit 1',
"sleep 10",
"ls -lR " + result,
"aws s3 sync " + result + " s3://cspkg/" + branchp + eventp + "/" + server + "/" + arch + "/" + result + " --only-show-errors",
//clean old versions of .deb/.rpm files
"source /mdb/" + builddir + "/storage/columnstore/columnstore/VERSION && " +
"CURRENT_VERSION=${COLUMNSTORE_VERSION_MAJOR}.${COLUMNSTORE_VERSION_MINOR}.${COLUMNSTORE_VERSION_PATCH} && " +
"aws s3 rm s3://cspkg/" + branchp + eventp + "/" + server + "/" + arch + "/" + result + "/ " +
"--recursive " +
"--exclude \"*\" " +
// include only debs/rpms with columnstore in names
"--include \"*columnstore*.deb\" " +
"--include \"*columnstore*.rpm\" " +
// but do not delete the ones matching CURRENT_VERSION
"--exclude \"*${CURRENT_VERSION}*.deb\" " +
"--exclude \"*${CURRENT_VERSION}*.rpm\" " +
"--only-show-errors",
"aws s3 sync " + result + "/" + " s3://cspkg/" + branchp + eventp + "/" + server + "/" + arch + "/" + result + " --only-show-errors",
'echo "Data uploaded to: ' + publish_pkg_url + '"',
make_clickable_link(publish_pkg_url),
"rm -rf " + result + "/*",
],
},
@ -239,15 +184,8 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
local execInnerDocker(command, containerName, flags="") =
"docker exec " + flags + " -t " + containerName + " " + command,
local execInnerDockerNoTTY(command, containerName, flags="") =
"docker exec " + flags + " " + containerName + " " + command,
local getContainerName(stepname) = stepname + "$${DRONE_BUILD_NUMBER}",
local installCmapi(containerName, pkg_format) =
if (pkg_format == "deb") then execInnerDocker('bash -c "apt-get clean && apt-get update -y && apt-get install -y mariadb-columnstore-cmapi"', containerName)
else execInnerDocker('bash -c "yum update -y && yum install -y MariaDB-columnstore-cmapi"', containerName),
local prepareTestContainer(containerName, result, do_setup) =
'sh -c "apk add bash && bash /mdb/' + builddir + "/storage/columnstore/columnstore/build/prepare_test_container.sh" +
" --container-name " + containerName +
@ -256,7 +194,12 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
" --packages-url " + packages_url +
" --do-setup " + std.toString(do_setup) + '"',
local reportTestStage(containerName, result, stage) = 'sh -c "apk add bash && bash /mdb/' + builddir + "/storage/columnstore/columnstore/build/report_test_stage.sh " + containerName + " " + result + " " + stage + '"',
local reportTestStage(containerName, result, stage) =
'sh -c "apk add bash && bash /mdb/' + builddir + '/storage/columnstore/columnstore/build/report_test_stage.sh' +
' --container-name ' + containerName +
' --result-path ' + result +
' --stage ' + stage + '"',
_volumes:: {
mdb: {
@ -275,7 +218,8 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
volumes: [pipeline._volumes.mdb, pipeline._volumes.docker],
commands: [
prepareTestContainer(getContainerName("smoke"), result, true),
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_smoke.sh " + getContainerName("smoke"),
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_smoke.sh" +
' --container-name ' + getContainerName("smoke"),
],
},
smokelog:: {
@ -301,14 +245,16 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
},
},
commands: [
// why do we mount cgroups here, but miss it on other steps?
prepareTestContainer(getContainerName("upgrade") + version, result, false),
if (pkg_format == "deb")
then execInnerDocker('bash -c "./upgrade_setup_deb.sh ' + version + " " + result + " " + arch + " " + repo_pkg_url_no_res + ' $${UPGRADE_TOKEN}"',
getContainerName("upgrade") + version),
if (pkg_format == "rpm")
then execInnerDocker('bash -c "./upgrade_setup_rpm.sh ' + version + " " + result + " " + arch + " " + repo_pkg_url_no_res + ' $${UPGRADE_TOKEN}"',
getContainerName("upgrade") + version),
execInnerDocker('bash -c "./upgrade_setup_' + pkg_format + '.sh '
+ version + ' '
+ result + ' '
+ arch + ' '
+ repo_pkg_url_no_res
+ ' $${UPGRADE_TOKEN}"',
getContainerName("upgrade") + version
)
],
},
upgradelog:: {
@ -411,7 +357,6 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
dockerfile:: {
name: "dockerfile",
depends_on: ["publish pkg", "publish cmapi build"],
//failure: 'ignore',
image: "alpine/git:2.49.0",
environment: {
DOCKER_BRANCH_REF: "${DRONE_SOURCE_BRANCH}",
@ -470,17 +415,10 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
},
commands: [
prepareTestContainer(getContainerName("cmapi"), result, true),
installCmapi(getContainerName("cmapi"), pkg_format),
"cd cmapi",
"for i in mcs_node_control cmapi_server failover; do docker cp $${i}/test cmapi$${DRONE_BUILD_NUMBER}:" + cmapi_path + "/$${i}/; done",
"docker cp run_tests.py cmapi$${DRONE_BUILD_NUMBER}:" + cmapi_path + "/",
execInnerDocker("systemctl start mariadb-columnstore-cmapi", getContainerName("cmapi")),
// set API key to /etc/columnstore/cmapi_server.conf
execInnerDocker('bash -c "mcs cluster set api-key --key somekey123"', getContainerName("cmapi")),
// copy cmapi conf file for test purposes (there are api key already set inside)
execInnerDocker('bash -c "cp %s/cmapi_server.conf %s/cmapi_server/"' % [etc_path, cmapi_path], getContainerName("cmapi")),
execInnerDocker("systemctl stop mariadb-columnstore-cmapi", getContainerName("cmapi")),
execInnerDocker('bash -c "cd ' + cmapi_path + ' && python/bin/python3 run_tests.py"', getContainerName("cmapi")),
"apk add bash && bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_cmapi_test.sh" +
" --container-name " + getContainerName("cmapi") +
" --pkg-format " + pkg_format,
],
},
cmapilog:: {
@ -498,9 +436,8 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
multi_node_mtr:: {
name: "mtr",
depends_on: ["dockerhub"],
//failure: 'ignore',
image: "docker:28.2.2",
volumes: [pipeline._volumes.docker],
volumes: [pipeline._volumes.docker, pipeline._volumes.mdb],
environment: {
DOCKER_LOGIN: {
from_secret: "dockerhub_user",
@ -512,18 +449,10 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
},
commands: [
"echo $$DOCKER_PASSWORD | docker login --username $$DOCKER_LOGIN --password-stdin",
"cd docker",
"cp .env_example .env",
'sed -i "/^MCS_IMAGE_NAME=/s/=.*/=${MCS_IMAGE_NAME}/" .env',
'sed -i "/^MAXSCALE=/s/=.*/=false/" .env',
"docker-compose up -d",
"docker exec mcs1 provision mcs1 mcs2 mcs3",
"docker cp ../mysql-test/columnstore mcs1:" + mtr_path + "/suite/",
"docker exec -t mcs1 chown mysql:mysql -R " + mtr_path,
'docker exec -t mcs1 mariadb -e "create database if not exists test;"',
// delay for manual debugging on live instance
"sleep $${COMPOSE_DELAY_SECONDS:-1s}",
'docker exec -t mcs1 bash -c "cd ' + mtr_path + " && ./mtr --extern socket=" + socket_path + ' --force --print-core=detailed --print-method=gdb --max-test-fail=0 --suite=columnstore/basic,columnstore/bugfixes"',
"apk add bash && bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_multi_node_mtr.sh " +
"--columnstore-image-name $${MCS_IMAGE_NAME} " +
"--distro " + platform,
],
},
@ -531,7 +460,6 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
type: "docker",
name: std.join(" ", [branch, platform, event, arch, server, customBootstrapParams, customBuildEnvCommandsMapKey]),
platform: { arch: arch },
// [if arch == 'arm64' then 'node']: { arch: 'arm64' },
clone: { depth: 10 },
steps: [
{
@ -584,25 +512,22 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
SCCACHE_BUCKET: "cs-sccache",
SCCACHE_REGION: "us-east-1",
SCCACHE_S3_USE_SSL: "true",
SCCACHE_S3_KEY_PREFIX: result + branch + server + arch + "${DRONE_PULL_REQUEST}",
//SCCACHE_ERROR_LOG: '/tmp/sccache_log.txt',
//SCCACHE_LOG: 'debug',
SCCACHE_S3_KEY_PREFIX: result + branch + server + arch,
},
commands: [
"mkdir /mdb/" + builddir + "/" + result,
]
+ get_sccache
+ customEnvCommands(customBuildEnvCommandsMapKey, builddir) +
[
'bash -c "set -o pipefail && bash /mdb/' + builddir + "/storage/columnstore/columnstore/build/bootstrap_mcs.sh " +
"--build-type RelWithDebInfo " +
"--distro " + platform + " " +
"--build-packages --install-deps --sccache " +
"--build-path " + "/mdb/" + builddir + "/builddir " +
"--build-packages --install-deps --sccache" +
" " + customBootstrapParams +
" " + customBootstrapParamsForExisitingPipelines(platform) + " | " +
"/mdb/" + builddir + "/storage/columnstore/columnstore/build/ansi2txt.sh " +
"/mdb/" + builddir + "/" + result + '/build.log"',
"sccache --show-stats",
"/mdb/" + builddir + "/" + result + '/build.log "',
],
},
{
@ -614,7 +539,7 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
DEBIAN_FRONTEND: "noninteractive",
},
commands: [
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/build_cmapi.sh --distro " + platform + " --arch " + arch,
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/build_cmapi.sh --distro " + platform,
],
},
{
@ -629,23 +554,9 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/createrepo.sh --result " + result,
],
},
{
name: "unittests",
depends_on: ["createrepo"],
image: img,
volumes: [pipeline._volumes.mdb],
environment: {
DEBIAN_FRONTEND: "noninteractive",
},
commands: [
"cd /mdb/" + builddir,
testPreparation(platform),
testRun(platform),
],
},
{
name: "pkg",
depends_on: ["unittests"],
depends_on: ["createrepo"],
image: "alpine/git:2.49.0",
when: {
status: ["success", "failure"],
@ -684,19 +595,15 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
(if (platform == "rockylinux:8" && arch == "amd64") then [pipeline.dockerfile] + [pipeline.dockerhub] + [pipeline.multi_node_mtr] else [pipeline.mtr] + [pipeline.mtrlog] + [pipeline.publish("mtrlog")]) +
[pipeline.regression(regression_tests[i], if (i == 0) then ["mtr", "publish pkg", "publish cmapi build"] else [regression_tests[i - 1]]) for i in indexes(regression_tests)] +
[pipeline.regressionlog] +
[pipeline.publish("regressionlog")] +
// [pipeline.upgrade(mdb_server_versions[i]) for i in indexes(mdb_server_versions)] +
// (if (std.length(mdb_server_versions) == 0) then [] else [pipeline.upgradelog] + [pipeline.publish("upgradelog")]) +
(if (event == "cron") then [pipeline.publish("regressionlog latest", "latest")] else []),
(if (event == "cron") then [pipeline.publish("regressionlog latest", "latest")] else [pipeline.publish("regressionlog")]),
volumes: [pipeline._volumes.mdb { temp: {} }, pipeline._volumes.docker { host: { path: "/var/run/docker.sock" } }],
trigger: {
event: [event],
branch: [branch],
},
// + (if event == 'cron' then {
// cron: ['nightly-' + std.strReplace(branch, '.', '-')],
// } else {}),
};
local FinalPipeline(branch, event) = {
@ -749,11 +656,11 @@ local FinalPipeline(branch, event) = {
[
Pipeline(any_branch, p, "custom", "amd64", "10.6-enterprise")
for p in platforms_custom
for p in platforms[current_branch]
] +
// [
// Pipeline(any_branch, p, "custom", "arm64", "10.6-enterprise")
// for p in platforms_arm_custom
// for p in platforms_arm[current_branch];
// ]
// +
[

View File

@ -49,8 +49,12 @@ include(compiler_flags)
include(misc)
include(cpack_manage)
add_subdirectory(dbcon/mysql)
if(NOT TARGET columnstore)
return()
endif()
set(COMPONENTS
dbcon/mysql
utils
oam/oamcpp
dbcon/execplan

View File

@ -1,4 +1,4 @@
COLUMNSTORE_VERSION_MAJOR=23
COLUMNSTORE_VERSION_MINOR=10
COLUMNSTORE_VERSION_PATCH=4
COLUMNSTORE_VERSION_PATCH=5
COLUMNSTORE_VERSION_RELEASE=1

View File

@ -11,7 +11,6 @@ export CLICOLOR_FORCE=1 #cmake output
INSTALL_PREFIX="/usr/"
DATA_DIR="/var/lib/mysql/data"
CMAKE_BIN_NAME=cmake
CTEST_BIN_NAME=ctest
RPM_CONFIG_DIR="/etc/my.cnf.d"
DEB_CONFIG_DIR="/etc/mysql/mariadb.conf.d"
@ -53,7 +52,6 @@ optparse.define short=O long=static desc="Build all with static libraries" varia
optparse.define short=p long=build-packages desc="Build packages" variable=BUILD_PACKAGES default=false value=true
optparse.define short=P long=report-path desc="Path for storing reports and profiles" variable=REPORT_PATH default="/core"
optparse.define short=r long=restart-services variable=RESTART_SERVICES default=true value=false
optparse.define short=s long=sccache desc="Build with sccache" variable=SCCACHE default=false value=true
optparse.define short=S long=skip-columnstore-submodules desc="Skip columnstore submodules initialization" variable=SKIP_SUBMODULES default=false value=true
optparse.define short=t long=build-type desc="Build Type: ${BUILD_TYPE_OPTIONS[*]}" variable=MCS_BUILD_TYPE
optparse.define short=T long=tsan desc="Build with TSAN" variable=TSAN default=false value=true
@ -62,6 +60,7 @@ optparse.define short=U long=ubsan desc="Build with UBSAN" variable=UBSAN defaul
optparse.define short=v long=verbose desc="Verbose makefile commands" variable=MAKEFILE_VERBOSE default=false value=true
optparse.define short=V long=add-branch-name-to-outdir desc="Add branch name to build output directory" variable=BRANCH_NAME_TO_OUTDIR default=false value=true
optparse.define short=W long=without-core-dumps desc="Do not produce core dumps" variable=WITHOUT_COREDUMPS default=false value=true
optparse.define short=s long=sccache desc="Build with sccache" variable=SCCACHE default=false value=true
source $(optparse.build)
@ -77,11 +76,33 @@ if [[ ! " ${DISTRO_OPTIONS[*]} " =~ " ${OS} " ]]; then
detect_distro
fi
pkg_format="deb"
if [[ "$OS" == *"rocky"* ]]; then
pkg_format="rpm"
select_pkg_format ${OS}
if [[ "$PKG_FORMAT" == "rpm" ]]; then
CTEST_BIN_NAME="ctest3"
else
CTEST_BIN_NAME="ctest"
fi
install_sccache() {
if [[ "$SCCACHE" == false ]]; then
return
fi
if [[ "$(arch)" == "x86_64" ]]; then
sccache_arch="x86_64"
else
sccache_arch="aarch64"
fi
message "getting sccache..."
curl -L -o sccache.tar.gz \
"https://github.com/mozilla/sccache/releases/download/v0.10.0/sccache-v0.10.0-${sccache_arch}-unknown-linux-musl.tar.gz"
tar xzf sccache.tar.gz
install sccache*/sccache /usr/local/bin/ && message "sccache installed"
}
install_deps() {
if [[ $INSTALL_DEPS = false ]]; then
return
@ -129,6 +150,7 @@ install_deps() {
}
install_deps
install_sccache
cd $COLUMSNTORE_SOURCE_PATH
COLUMNSTORE_BRANCH=$(git rev-parse --abbrev-ref HEAD)
@ -245,7 +267,7 @@ modify_packaging() {
echo "Modifying_packaging..."
cd $MDB_SOURCE_PATH
if [[ $pkg_format == "deb" ]]; then
if [[ $PKG_FORMAT == "deb" ]]; then
sed -i 's|.*-d storage/columnstore.*|elif [[ -d storage/columnstore/columnstore/debian ]]|' debian/autobake-deb.sh
fi
@ -257,7 +279,7 @@ modify_packaging() {
grep mariadb /usr/share/lto-disabled-list/lto-disabled-list
fi
if [[ $pkg_format == "deb" ]]; then
if [[ $PKG_FORMAT == "deb" ]]; then
apt-cache madison liburing-dev | grep liburing-dev || {
sed 's/liburing-dev/libaio-dev/g' -i debian/control &&
sed '/-DIGNORE_AIO_CHECK=YES/d' -i debian/rules &&
@ -466,7 +488,7 @@ generate_svgs() {
build_package() {
cd $MDB_SOURCE_PATH
if [[ $pkg_format == "rpm" ]]; then
if [[ $PKG_FORMAT == "rpm" ]]; then
command="cmake ${MDB_CMAKE_FLAGS[@]} && make -j\$(nproc) package"
else
export DEBIAN_FRONTEND="noninteractive"
@ -555,7 +577,7 @@ run_unit_tests() {
message "Running unittests"
cd $MARIA_BUILD_PATH
${CTEST_BIN_NAME} . -R columnstore: -j $(nproc) --progress --output-on-failure
${CTEST_BIN_NAME} . -R columnstore: -j $(nproc) --output-on-failure
cd - >/dev/null
}
@ -745,9 +767,15 @@ init_submodules
if [[ $BUILD_PACKAGES = true ]]; then
modify_packaging
build_package
message_splitted "PACKAGES BUILD FINISHED"
exit 0
( build_package && run_unit_tests )
exit_code=$?
if [[ $SCCACHE = true ]]; then
sccache --show-stats
fi
exit $exit_code
fi
stop_service

View File

@ -13,7 +13,6 @@ MDB_SOURCE_PATH=$(realpath "$SCRIPT_LOCATION"/../../../..)
source "$SCRIPT_LOCATION"/utils.sh
optparse.define short=d long=distro desc="distro" variable=OS
optparse.define short=a long=arch desc="architecture" variable=ARCH
source $(optparse.build)
echo "Arguments received: $@"
@ -22,18 +21,15 @@ if [ "$EUID" -ne 0 ]; then
exit 1
fi
if [[ -z "${OS:-}" || -z "${ARCH:-}" ]]; then
echo "Please provide provide --distro and --arch parameters, e.g. ./build_cmapi.sh --distro ubuntu:22.04 --arch amd64"
if [[ -z "${OS:-}" ]]; then
echo "Please provide provide --distro parameter, e.g. ./build_cmapi.sh --distro ubuntu:22.04"
exit 1
fi
pkg_format="deb"
if [[ "$OS" == *"rocky"* ]]; then
pkg_format="rpm"
fi
select_pkg_format ${OS}
if [[ "$ARCH" == "arm64" ]]; then
export CC=gcc #TODO: what it is for?
if [[ "$(arch)" == "arm64" ]]; then
export CC=gcc
fi
on_exit() {
@ -55,18 +51,18 @@ install_deps() {
retry_eval 5 "dnf config-manager --set-enabled devel && dnf update -q -y" #to make redhat-lsb-core available for rocky 9
fi
if [[ "$pkg_format" == "rpm" ]]; then
if [[ "$PKG_FORMAT" == "rpm" ]]; then
retry_eval 5 "dnf update -q -y && dnf install -q -y epel-release wget zstd findutils gcc cmake make rpm-build redhat-lsb-core libarchive"
else
retry_eval 5 "apt-get update -qq -o Dpkg::Use-Pty=0 && apt-get install -qq -o Dpkg::Use-Pty=0 wget zstd findutils gcc cmake make dpkg-dev lsb-release"
fi
if [ "$ARCH" == "amd64" ]; then
if [ "$(arch)" == "x86_64" ]; then
PYTHON_URL="https://github.com/indygreg/python-build-standalone/releases/download/20220802/cpython-3.9.13+20220802-x86_64_v2-unknown-linux-gnu-pgo+lto-full.tar.zst"
elif [ "$ARCH" == "arm64" ]; then
elif [ "$(arch)" == "arm64" ]; then
PYTHON_URL="https://github.com/indygreg/python-build-standalone/releases/download/20220802/cpython-3.9.13+20220802-aarch64-unknown-linux-gnu-noopt-full.tar.zst"
else
echo "Unsupported architecture: $ARCH"
echo "Unsupported architecture: $(arch)"
exit 1
fi
@ -84,7 +80,7 @@ install_deps() {
build_cmapi() {
cd "$COLUMNSTORE_SOURCE_PATH"/cmapi
./cleanup.sh
cmake -D"${pkg_format^^}"=1 -DSERVER_DIR="$MDB_SOURCE_PATH" . && make package
cmake -D"${PKG_FORMAT^^}"=1 -DSERVER_DIR="$MDB_SOURCE_PATH" . && make package
}
install_deps
build_cmapi

28
build/install_clang_deb.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash
set -eo pipefail
SCRIPT_LOCATION=$(dirname "$0")
source "$SCRIPT_LOCATION"/utils.sh
CLANG_VERSION="$1"
if [[ $# -ne 1 ]]; then
echo "Please pass clang-version as a first parameter"
exit 1
fi
change_ubuntu_mirror us
message "Installing clang-${CLANG_VERSION}"
retry_eval 5 apt-get clean && apt-get update && apt-get install -y wget curl lsb-release software-properties-common gnupg
wget https://apt.llvm.org/llvm.sh
bash llvm.sh $CLANG_VERSION
export CC=/usr/bin/clang
export CXX=/usr/bin/clang++

View File

@ -27,8 +27,10 @@ if [[ -z "${CONTAINER_NAME:-}" || -z "${DOCKER_IMAGE:-}" || -z "${RESULT:-}" ||
exit 1
fi
select_pkg_format ${RESULT}
start_container() {
if [[ "$RESULT" == *rocky* ]]; then
if [[ $PKG_FORMAT == "rpm" ]]; then
SYSTEMD_PATH="/usr/lib/systemd/systemd"
MTR_PATH="/usr/share/mysql-test"
else
@ -103,7 +105,7 @@ fi
# install deps
if [[ "$RESULT" == *rocky* ]]; then
execInnerDockerWithRetry "$CONTAINER_NAME" 'yum update -y && yum install -y cracklib-dicts diffutils elfutils epel-release expect findutils iproute gawk gcc-c++ gdb hostname lz4 patch perl procps-ng rsyslog sudo tar wget which'
execInnerDockerWithRetry "$CONTAINER_NAME" 'yum --nobest update -y && yum --nobest install -y cracklib-dicts diffutils elfutils epel-release expect findutils iproute gawk gcc-c++ gdb hostname lz4 patch perl procps-ng rsyslog sudo tar wget which'
else
change_ubuntu_mirror_in_docker "$CONTAINER_NAME" "us"
execInnerDockerWithRetry "$CONTAINER_NAME" 'apt update -y && apt install -y elfutils expect findutils iproute2 g++ gawk gdb hostname liblz4-tool patch procps rsyslog sudo tar wget'
@ -128,5 +130,5 @@ echo "PrepareTestStage completed in $CONTAINER_NAME"
if [[ -z $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
start_container
prepare_container
else warn "Container ${CONTAINER_NAME} is already running!"
else message "Container ${CONTAINER_NAME} is already running, skipping prepare step"
fi

View File

@ -2,15 +2,33 @@
set -eo pipefail
CONTAINER_NAME=$1
RESULT=$2
STAGE=$3
SCRIPT_LOCATION=$(dirname "$0")
source "$SCRIPT_LOCATION"/utils.sh
optparse.define short=c long=container-name desc="Name of the Docker container where mtr tests will run" variable=CONTAINER_NAME
optparse.define short=r long=result-path desc="Path for logs and results" variable=RESULT
optparse.define short=s long=stage desc="Test stage name" variable=STAGE
source $(optparse.build)
echo "Arguments received: $@"
cleanup() {
if [[ -n $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
echo "Cleaning up container ${CONTAINER_NAME}..."
docker rm -f "${CONTAINER_NAME}" || echo "Can't remove container ${CONTAINER_NAME}!"
fi
}
#Remove the container on exit
trap cleanup EXIT
for flag in CONTAINER_NAME RESULT STAGE; do
if [[ -z "${!flag}" ]]; then
error "Missing required flag: -${flag:0:1} / --${flag,,}"
exit 1
fi
done
if [[ "$EUID" -ne 0 ]]; then
error "Please run script as root"
exit 1
@ -21,7 +39,9 @@ if [[ -z $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
exit 1
fi
if [[ "$RESULT" == *rocky* ]]; then
select_pkg_format ${RESULT}
if [[ "$PKG_FORMAT" == "rpm" ]]; then
SYSTEMD_PATH="/usr/lib/systemd/systemd"
MTR_PATH="/usr/share/mysql-test"
else
@ -59,14 +79,14 @@ elif [[ "${CONTAINER_NAME}" == *upgrade* ]]; then
elif [[ "${CONTAINER_NAME}" == *regression* ]]; then
echo "---------- start columnstore regression short report ----------"
execInnerDocker "$CONTAINER_NAME" 'cd /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest; cat go.log || echo "missing go.log"'
execInnerDocker "$CONTAINER_NAME" 'cd /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest; cat go.log' || echo "missing go.log"
echo "---------- end columnstore regression short report ----------"
echo
docker cp "${CONTAINER_NAME}:/mariadb-columnstore-regression-test/mysql/queries/nightly/alltest/reg-logs/" "/drone/src/${RESULT}/" || echo "missing regression logs"
docker cp "${CONTAINER_NAME}:/mariadb-columnstore-regression-test/mysql/queries/nightly/alltest/testErrorLogs.tgz" "/drone/src/${RESULT}/" || echo "missing testErrorLogs.tgz"
execInnerDocker "$CONTAINER_NAME" 'tar czf regressionQueries.tgz /mariadb-columnstore-regression-test/mysql/queries/'
execInnerDocker "$CONTAINER_NAME" 'cd /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest; tar czf testErrorLogs2.tgz *.log /var/log/mariadb/columnstore || echo "failed to grab regression results"'
execInnerDocker "$CONTAINER_NAME" 'cd /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest && tar czf testErrorLogs2.tgz *.log /var/log/mariadb/columnstore' || echo "failed to grab regression results"
docker cp "${CONTAINER_NAME}:/mariadb-columnstore-regression-test/mysql/queries/nightly/alltest/testErrorLogs2.tgz" "/drone/src/${RESULT}/" || echo "missing testErrorLogs2.tgz"
docker cp "${CONTAINER_NAME}:regressionQueries.tgz" "/drone/src/${RESULT}/" || echo "missing regressionQueries.tgz"
@ -86,11 +106,3 @@ echo "Saved artifacts:"
ls -R "/drone/src/${RESULT}/"
echo "Done reporting ${STAGE}"
cleanup() {
if [[ -n $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
echo "Cleaning up container ${CONTAINER_NAME}..."
docker rm -f "${CONTAINER_NAME}" || echo "Can't remove container ${CONTAINER_NAME}!"
fi
}
#Remove the container on exit
trap cleanup EXIT

55
build/run_cmapi_test.sh Executable file
View File

@ -0,0 +1,55 @@
#!/bin/bash
set -eo pipefail
SCRIPT_LOCATION=$(dirname "$0")
COLUMNSTORE_SOURCE_PATH=$(realpath "$SCRIPT_LOCATION"/../)
MDB_SOURCE_PATH=$(realpath "$SCRIPT_LOCATION"/../../../..)
CMAPI_PATH="/usr/share/columnstore/cmapi"
ETC_PATH="/etc/columnstore"
source "$SCRIPT_LOCATION"/utils.sh
optparse.define short=c long=container-name desc="Name of the Docker container where cmapi tests will run" variable=CONTAINER_NAME
optparse.define short=f long=pkg-format desc="Package format" variable=PKG_FORMAT
source $(optparse.build)
echo "Arguments received: $@"
for flag in CONTAINER_NAME PKG_FORMAT; do
if [[ -z "${!flag}" ]]; then
error "Missing required flag: -${flag:0:1} / --${flag,,}"
exit 1
fi
done
prepare_environment() {
echo "Preparing for cmapi test run..."
if [[ "$PKG_FORMAT" == "deb" ]]; then
execInnerDocker $CONTAINER_NAME "apt-get clean && apt-get update -y && apt-get install -y mariadb-columnstore-cmapi"
else
execInnerDocker $CONTAINER_NAME "yum update -y && yum install -y MariaDB-columnstore-cmapi"
fi
cd cmapi
for i in mcs_node_control cmapi_server failover; do
docker cp "${i}/test" "$CONTAINER_NAME:${CMAPI_PATH}/${i}/"
done
docker cp run_tests.py "$CONTAINER_NAME:${CMAPI_PATH}/"
execInnerDocker $CONTAINER_NAME "systemctl start mariadb-columnstore-cmapi"
# set API key to /etc/columnstore/cmapi_server.conf
execInnerDocker $CONTAINER_NAME "mcs cluster set api-key --key somekey123"
# copy cmapi conf file for test purposes (there are api key already set inside)
execInnerDocker $CONTAINER_NAME "cp ${ETC_PATH}/cmapi_server.conf ${CMAPI_PATH}/cmapi_server/"
execInnerDocker $CONTAINER_NAME "systemctl stop mariadb-columnstore-cmapi"
}
run_cmapi_test() {
execInnerDocker $CONTAINER_NAME "cd ${CMAPI_PATH} && python/bin/python3 run_tests.py"
}
prepare_environment
run_cmapi_test

View File

@ -20,17 +20,21 @@ if [[ "$EUID" -ne 0 ]]; then
exit 1
fi
if [[ -z "${CONTAINER_NAME}" ]]; then
echo "Please provide mtr container name as a parameter, e.g. ./run_mtr.sh -c mtr183"
for flag in CONTAINER_NAME DISTRO EVENT MTR_SUITE_LIST; do
if [[ -z "${!flag}" ]]; then
error "Missing required flag: -${flag:0:1} / --${flag,,}"
exit 1
fi
fi
done
if [[ -z $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
error "Container '${CONTAINER_NAME}' is not running."
exit 1
fi
if [[ "$DISTRO" == *rocky* ]]; then
select_pkg_format ${DISTRO}
if [[ "$PKG_FORMAT" == "rpm" ]]; then
SOCKET_PATH="/var/lib/mysql/mysql.sock"
MTR_PATH="/usr/share/mysql-test"
else

58
build/run_multi_node_mtr.sh Executable file
View File

@ -0,0 +1,58 @@
#!/bin/bash
set -eo pipefail
SCRIPT_LOCATION=$(dirname "$0")
source "$SCRIPT_LOCATION"/utils.sh
optparse.define short=i long=columnstore-image-name desc="Name of columnstore docker image" variable=MCS_IMAGE_NAME
optparse.define short=d long=distro desc="Linux distro for which multinode mtr is executed" variable=DISTRO
source $(optparse.build)
echo "Arguments received: $@"
if [[ "$EUID" -ne 0 ]]; then
error "Please run script as root"
exit 1
fi
for flag in MCS_IMAGE_NAME DISTRO; do
if [[ -z "${!flag}" ]]; then
error "Missing required flag: -${flag:0:1} / --${flag,,}"
exit 1
fi
done
select_pkg_format ${DISTRO}
if [[ "$PKG_FORMAT" == "rpm" ]]; then
SOCKET_PATH="/var/lib/mysql/mysql.sock"
MTR_PATH="/usr/share/mysql-test"
else
SOCKET_PATH="/run/mysqld/mysqld.sock"
MTR_PATH="/usr/share/mysql/mysql-test"
fi
message "Running multinode mtr tests..."
cd docker
cp .env_example .env
sed -i "/^MCS_IMAGE_NAME=/s|=.*|=${MCS_IMAGE_NAME}|" .env
sed -i "/^MAXSCALE=/s|=.*|=false|" .env
docker-compose up -d
docker exec mcs1 provision mcs1 mcs2 mcs3
docker cp ../mysql-test/columnstore mcs1:"${MTR_PATH}/suite/"
docker exec -t mcs1 chown -R mysql:mysql "${MTR_PATH}"
docker exec -t mcs1 mariadb -e "CREATE DATABASE IF NOT EXISTS test;"
docker exec -t mcs1 bash -c "\
cd '${MTR_PATH}' && \
./mtr \
--extern socket='${SOCKET_PATH}' \
--force \
--print-core=detailed \
--print-method=gdb \
--max-test-fail=0 \
--suite=columnstore/basic,columnstore/bugfixes \
"

View File

@ -12,7 +12,7 @@ optparse.define short=t long=regression-timeout desc="Timeout for the regression
optparse.define short=n long=test-name desc="Name of regression test to execute" variable=TEST_NAME
source "$(optparse.build)"
for flag in CONTAINER_NAME REGRESSION_BRANCH DISTRO; do
for flag in CONTAINER_NAME REGRESSION_BRANCH DISTRO REGRESSION_TIMEOUT TEST_NAME; do
if [[ -z "${!flag}" ]]; then
error "Missing required flag: -${flag:0:1} / --${flag,,}"
exit 1

View File

@ -5,7 +5,8 @@ set -eo pipefail
SCRIPT_LOCATION=$(dirname "$0")
source "$SCRIPT_LOCATION"/utils.sh
CONTAINER_NAME="$1"
optparse.define short=c long=container-name desc="Name of the Docker container where mtr tests will run" variable=CONTAINER_NAME
source $(optparse.build)
echo "Arguments received: $@"
@ -14,6 +15,11 @@ if [[ "$EUID" -ne 0 ]]; then
exit 1
fi
if [[ -z "${CONTAINER_NAME:-}" ]]; then
echo "Please provide provide --container-name parameter, e.g. ./run_smoke.sh --container-name smoke185"
exit 1
fi
if [[ -z $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
error "Container '${CONTAINER_NAME}' is not running."
exit 1

View File

@ -171,6 +171,16 @@ detect_distro() {
message "Detected $color_yellow$OS $OS_VERSION$color_normal"
}
select_pkg_format() {
local distro="$1"
if [[ "$distro" == *rocky* ]]; then
export PKG_FORMAT="rpm"
else
export PKG_FORMAT="deb"
fi
}
menuStr=""
function hideCursor() {
@ -548,15 +558,6 @@ function execInnerDockerNoTTY() {
fi
}
function change_ubuntu_mirror() {
local region="$1"
message "Changing Ubuntu mirror to $region"
sed -i "s|//\(${region}\.\)\?archive\.ubuntu\.com|//${region}.archive.ubuntu.com|g" /etc/apt/sources.list 2>/dev/null || true
sed -i "s|//\(${region}\.\)\?archive\.ubuntu\.com|//${region}.archive.ubuntu.com|g" /etc/apt/sources.list.d/ubuntu.sources 2>/dev/null || true
cat /etc/apt/sources.list.d/ubuntu.sources /etc/apt/sources.list 2>/dev/null | grep archive || true
message_split
}
function execInnerDockerWithRetry() {
local max_retries=5
local container_name=$1
@ -583,6 +584,15 @@ function execInnerDockerWithRetry() {
return 0
}
function change_ubuntu_mirror() {
local region="$1"
message "Changing Ubuntu mirror to $region"
sed -i "s|//\(${region}\.\)\?archive\.ubuntu\.com|//${region}.archive.ubuntu.com|g" /etc/apt/sources.list 2>/dev/null || true
sed -i "s|//\(${region}\.\)\?archive\.ubuntu\.com|//${region}.archive.ubuntu.com|g" /etc/apt/sources.list.d/ubuntu.sources 2>/dev/null || true
cat /etc/apt/sources.list.d/ubuntu.sources /etc/apt/sources.list 2>/dev/null | grep archive || true
message_split
}
change_ubuntu_mirror_in_docker() {
local container_name=$1
local region=$2

View File

@ -25,8 +25,8 @@ set(THRIFT_LIBRARY ${THRIFT_LIBRARY_DIRS}/${CMAKE_STATIC_LIBRARY_PREFIX}thrift${
ExternalProject_Add(
external_thrift
URL https://github.com/apache/thrift/archive/refs/tags/v0.17.0.tar.gz
URL_HASH SHA256=f5888bcd3b8de40c2c2ab86896867ad9b18510deb412cba3e5da76fb4c604c29
URL https://github.com/apache/thrift/archive/refs/tags/v0.22.0.tar.gz
URL_HASH SHA256=c4649c5879dd56c88f1e7a1c03e0fbfcc3b2a2872fb81616bffba5aa8a225a37
PREFIX ${INSTALL_LOCATION}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${INSTALL_LOCATION}
-DBUILD_COMPILER=YES

View File

@ -43,7 +43,14 @@ SECRET_KEY = 'MCSIsTheBestEver' # not just a random string! (base32)
# network constants
LOCALHOSTS = ('localhost', '127.0.0.1', '::1')
# according to https://www.ibm.com/docs/en/storage-sentinel/1.1.2?topic=installation-map-your-local-host-loopback-address
LOCALHOSTS = (
'127.0.0.1',
'localhost', 'localhost.localdomain',
'localhost4', 'localhost4.localdomain4',
'::1',
'localhost6', 'localhost6.localdomain6',
)
CMAPI_INSTALL_PATH = '/usr/share/columnstore/cmapi/'
CMAPI_PYTHON_BIN = os.path.join(CMAPI_INSTALL_PATH, "python/bin/python3")

View File

@ -0,0 +1,258 @@
import errno
import fcntl
import logging
import socket
import struct
from ipaddress import ip_address
from typing import Optional
try:
import psutil
_PSUTIL_AVAILABLE = True
except ImportError:
psutil = None
_PSUTIL_AVAILABLE = False
from cmapi_server.exceptions import CMAPIBasicError
SIOCGIFADDR = 0x8915 # SIOCGIFADDR "socket ioctl get interface address"
class NetworkManager:
@classmethod
def get_ip_version(cls, ip_addr: str) -> int:
"""Get version of a given IP address.
:param ip_addr: IP to get version
:type ip_addr: str
:return: version of a given IP
:rtype: int
"""
return ip_address(ip_addr).version
@classmethod
def is_ip(cls, input_str: str) -> bool:
"""Check is input a valid IP or not.
:param input_str: input string
:type input_str: str
:return: True if input is a valid IP
:rtype: bool
"""
try:
ip_address(input_str)
return True
except ValueError:
return False
@classmethod
def resolve_hostname_to_ip(
cls,
hostname: str,
only_ipv4: bool = True,
exclude_loopback: bool = False
) -> list[str]:
"""Resolve a hostname to one or more IP addresses.
:param hostname: Hostname to resolve.
:type hostname: str
:param only_ipv4: Return only IPv4 addresses (default: True).
:type only_ipv4: bool
:param exclude_loopback: Exclude loopback addresses like 127.x.x.x (default: True).
:type exclude_loopback: bool
:return: List of resolved IP addresses.
:rtype: list[str]
"""
sorted_ips: list[str] = []
try:
addr_info = socket.getaddrinfo(
hostname,
None,
socket.AF_INET if only_ipv4 else socket.AF_UNSPEC,
socket.SOCK_STREAM
)
ip_set = {
info[4][0] for info in addr_info
if not (exclude_loopback and ip_address(info[4][0]).is_loopback)
}
sorted_ips = sorted(
list(ip_set),
key=lambda ip: (
not ip_address(ip).is_loopback, # loopback first (False < True)
ip_address(ip).version != 4, # IPv4 before IPv6 (False < True)
ip_address(ip) # lexical order
)
)
except socket.gaierror:
logging.error(
f'Standard name resolution failed for hostname: {hostname!r}',
exc_info=True
)
return sorted_ips
@classmethod
def get_ip_address_by_nic(cls, ifname: str) -> str:
"""Get IP address of a network interface.
:param ifname: network interface name
:type ifname: str
:return: ip address
:rtype: str
"""
# doesn't work on Windows,
# OpenBSD and probably doesn't on FreeBSD/pfSense either
ip_addr: str = ''
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
ip_addr = socket.inet_ntoa(
fcntl.ioctl(
s.fileno(),
SIOCGIFADDR,
struct.pack('256s', bytes(ifname[:15], 'utf-8'))
)[20:24]
)
except OSError as exc:
if exc.errno == errno.ENODEV:
logging.error(
f'Interface {ifname!r} doesn\'t exist.'
)
else:
logging.error(
f'Unknown OSError code while getting IP for an {ifname!r}',
exc_info=True
)
except Exception:
logging.error(
(
'Unknown exception while getting IP address of an '
f'{ifname!r} interface',
),
exc_info=True
)
return ip_addr
@classmethod
def get_current_node_ips(
cls, ignore_loopback: bool = False, only_ipv4: bool = True,
) -> list[str]:
"""Get all IP addresses for all existing network interfaces.
:param ignore_loopback: Ignore loopback addresses, defaults to False
:type ignore_loopback: bool, optional
:param only_ipv4: Return only IPv4 addresses, defaults to True
:type only_ipv4: bool, optional
:return: IP addresses for all node interfaces
:rtype: list[str]
:raises CMAPIBasicError: If no IPs are found
"""
ext_ips: list[str] = []
loopback_ips: list[str] = []
if _PSUTIL_AVAILABLE:
try:
for _, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if only_ipv4 and addr.family != socket.AF_INET:
continue
try:
ip = ip_address(addr.address)
if ip.is_loopback:
loopback_ips.append(str(ip))
else:
ext_ips.append(str(ip))
except ValueError:
continue # Not a valid IP (e.g., MAC addresses)
except Exception:
logging.warning(
'Failed to get IPs via psutil, falling back to ioctl',
exc_info=True
)
result = ext_ips if ignore_loopback else [*ext_ips, *loopback_ips]
if result:
return result
logging.warning(
'psutil returned no valid IPs, trying fallback method'
)
ext_ips: list[str] = []
loopback_ips: list[str] = []
# Fallback to stdlib method using fcntl/ioctl
for _, nic_name in socket.if_nameindex():
ip_addr = cls.get_ip_address_by_nic(nic_name)
if not ip_addr:
continue
if only_ipv4 and cls.get_ip_version(ip_addr) != 4:
continue
if ip_address(ip_addr).is_loopback:
loopback_ips.append(ip_addr)
else:
ext_ips.append(ip_addr)
result = ext_ips if ignore_loopback else [*ext_ips, *loopback_ips]
if not result:
raise CMAPIBasicError('No IP addresses found on this node.')
return result
@classmethod
def get_hostname(cls, ip_addr: str) -> Optional[str]:
"""Get hostname for a given IP address.
:param ip_addr: IP address to get hostname
:type ip_addr: str
:return: Hostname if it exists, otherwise None
:rtype: Optional[str]
"""
try:
hostnames = socket.gethostbyaddr(ip_addr)
return hostnames[0]
except socket.herror:
logging.error(f'No hostname found for address: {ip_addr!r}')
return None
@classmethod
def is_only_loopback_hostname(cls, hostname: str) -> bool:
"""Check if all IPs resolved from the hostname are loopback.
:param hostname: Hostname to check
:type hostname: str
:return: True if all resolved IPs are loopback, False otherwise
:rtype: bool
"""
ips = cls.resolve_hostname_to_ip(hostname)
if not ips:
return False
for ip in ips:
if not ip_address(ip).is_loopback:
return False
return True
@classmethod
def resolve_ip_and_hostname(cls, input_str: str) -> tuple[str, str]:
"""Resolve input string to an (IP, hostname) pair.
:param input_str: Input which may be an IP address or a hostname
:type input_str: str
:return: A tuple containing (ip, hostname)
:rtype: tuple[str, str]
:raises CMAPIBasicError: if hostname resolution yields no IPs
"""
ip: str = ''
hostname: str = None
if cls.is_ip(input_str):
ip = input_str
hostname = cls.get_hostname(input_str)
else:
hostname = input_str
ip_list = cls.resolve_hostname_to_ip(
input_str,
exclude_loopback=not cls.is_only_loopback_hostname(input_str)
)
if not ip_list:
raise CMAPIBasicError(f'No IPs found for {hostname!r}')
ip = ip_list[0]
return ip, hostname

View File

@ -20,6 +20,7 @@ from cmapi_server.constants import (
CMAPI_CONF_PATH, CMAPI_SINGLE_NODE_XML, DEFAULT_MCS_CONF_PATH, LOCALHOSTS,
MCS_DATA_PATH,
)
from cmapi_server.managers.network import NetworkManager
from mcs_node_control.models.node_config import NodeConfig
@ -928,7 +929,7 @@ def _remove_node_from_PMS(root, node):
return pm_num
def _add_Module_entries(root, node):
def _add_Module_entries(root, node: str) -> None:
'''
get new node id
add ModuleIPAddr, ModuleHostName, ModuleDBRootCount (don't set ModuleDBRootID* here)
@ -937,47 +938,52 @@ def _add_Module_entries(root, node):
'''
# XXXPAT: No guarantee these are the values used in the rest of the system.
# This will work best with a simple network configuration where there is 1 IP addr
# and 1 host name for a node.
ip4 = socket.gethostbyname(node)
if ip4 == node: # node is an IP addr
node_name = socket.gethostbyaddr(node)[0]
else:
node_name = node # node is a hostname
# TODO: what should we do with complicated network configs where node has
# several ips and\or several hostnames
ip4, hostname = NetworkManager.resolve_ip_and_hostname(node)
logging.info(f'Using ip address {ip4} and hostname {hostname}')
logging.info(f"_add_Module_entries(): using ip address {ip4} and hostname {node_name}")
smc_node = root.find("./SystemModuleConfig")
mod_count_node = smc_node.find("./ModuleCount3")
nnid_node = root.find("./NextNodeId")
smc_node = root.find('./SystemModuleConfig')
mod_count_node = smc_node.find('./ModuleCount3')
nnid_node = root.find('./NextNodeId')
nnid = int(nnid_node.text)
current_module_count = int(mod_count_node.text)
# look for existing entries and fix if they exist
for i in range(1, nnid):
ip_node = smc_node.find(f"./ModuleIPAddr{i}-1-3")
name_node = smc_node.find(f"./ModuleHostName{i}-1-3")
# if we find a matching IP address, but it has a different hostname, update the addr
if ip_node is not None and ip_node.text == ip4:
logging.info(f"_add_Module_entries(): found ip address already at ModuleIPAddr{i}-1-3")
hostname = smc_node.find(f"./ModuleHostName{i}-1-3").text
if hostname != node_name:
new_ip_addr = socket.gethostbyname(hostname)
logging.info(f"_add_Module_entries(): hostname doesn't match, updating address to {new_ip_addr}")
smc_node.find(f"ModuleHostName{i}-1-3").text = new_ip_addr
curr_ip_node = smc_node.find(f'./ModuleIPAddr{i}-1-3')
curr_name_node = smc_node.find(f'./ModuleHostName{i}-1-3')
# TODO: NETWORK: seems it's useless even in very rare cases.
# Even simplier to rewrite resolved IP an Hostname
# if we find a matching IP address, but it has a different hostname,
# update the addr
if curr_ip_node is not None and curr_ip_node.text == ip4:
logging.info(f'Found ip address already at ModuleIPAddr{i}-1-3')
if curr_name_node != hostname:
new_ip_addr = NetworkManager.resolve_hostname_to_ip(
curr_name_node
)
logging.info(
'Hostname doesn\'t match, updating address to '
f'{new_ip_addr!r}'
)
smc_node.find(f'ModuleHostName{i}-1-3').text = new_ip_addr
else:
logging.info(f"_add_Module_entries(): no update is necessary")
logging.info('No update for ModuleIPAddr{i}-1-3 is necessary')
return
# if we find a matching hostname, update the ip addr
if name_node is not None and name_node.text == node_name:
logging.info(f"_add_Module_entries(): found existing entry for {node_name}, updating its address to {ip4}")
ip_node.text = ip4
if curr_name_node is not None and curr_name_node.text == hostname:
logging.info(
f'Found existing entry for {hostname!r}, updating its '
f'address to {ip4!r}'
)
curr_ip_node.text = ip4
return
etree.SubElement(smc_node, f"ModuleIPAddr{nnid}-1-3").text = ip4
etree.SubElement(smc_node, f"ModuleHostName{nnid}-1-3").text = node_name
etree.SubElement(smc_node, f"ModuleDBRootCount{nnid}-3").text = "0"
etree.SubElement(smc_node, f'ModuleIPAddr{nnid}-1-3').text = ip4
etree.SubElement(smc_node, f'ModuleHostName{nnid}-1-3').text = hostname
etree.SubElement(smc_node, f'ModuleDBRootCount{nnid}-3').text = '0'
mod_count_node.text = str(current_module_count + 1)
nnid_node.text = str(nnid + 1)

View File

@ -425,7 +425,8 @@ DropTableProcessor::DDLResult DropTableProcessor::processPackageInternal(ddlpack
<< endl;
Message::Args args;
Message message(9);
args.add("Error in dropping table from systables.");
args.add(fTxnid.id);
args.add(" Error in dropping table from systables.");
args.add(errorMsg);
message.format(args);
result.result = (ResultCode)rc;

View File

@ -843,9 +843,12 @@ void CalpontSystemCatalog::getSysData(CalpontSelectExecutionPlan& csep, NJLSysDa
}
}
if (tryCnt >= 5)
if (tryCnt >= 5){
// throw runtime_error("Error occurred when calling system catalog. ExeMgr is not functioning.");
throw IDBExcept(ERR_SYSTEM_CATALOG);
Message::Args args;
args.add("Cannot connect to ExeMgr re-connections tries exceeded");
throw IDBExcept(ERR_SYSTEM_CATALOG, args);
}
}
csep.sessionID(fSessionID);
@ -911,8 +914,11 @@ void CalpontSystemCatalog::getSysData_EC(CalpontSelectExecutionPlan& csep, NJLSy
{
if (status >= 1000) // new error system
throw IDBExcept(status);
else
throw IDBExcept(ERR_SYSTEM_CATALOG);
else{
Message::Args args;
args.add("rowGroup status: " + std::to_string(status));
throw IDBExcept(ERR_SYSTEM_CATALOG, args);
}
}
if (rowCount > 0)
@ -961,7 +967,9 @@ void CalpontSystemCatalog::getSysData_FE(const CalpontSelectExecutionPlan& csep,
if (bs.length() == 0)
{
throw IDBExcept(ERR_LOST_CONN_EXEMGR);
Message::Args args;
args.add("look in error.log on node, having dbroot1");
throw IDBExcept(ERR_LOST_CONN_EXEMGR, args);
}
string emsgStr;
@ -992,8 +1000,11 @@ void CalpontSystemCatalog::getSysData_FE(const CalpontSelectExecutionPlan& csep,
// @bug 1782. check ExeMgr connection lost
if (bs.length() == 0)
throw IDBExcept(ERR_LOST_CONN_EXEMGR);
{
Message::Args args;
args.add("look in error.log on node, having dbroot1");
throw IDBExcept(ERR_LOST_CONN_EXEMGR, args);
}
if (!rowGroup)
{
rowGroup.reset(new RowGroup());

View File

@ -2205,22 +2205,32 @@ int ProcessDDLStatement(string& ddlStatement, string& schema, const string& /*ta
if (ddlStatement.find("AUTO_INCREMENT") != string::npos)
{
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED,
"Use of the MySQL auto_increment syntax is not supported in Columnstore. If "
"you wish to create an auto increment column in Columnstore, please consult "
"the Columnstore SQL Syntax Guide for the correct usage.");
ci->alterTableState = cal_connection_info::NOT_ALTER;
ci->isAlter = false;
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED, "The syntax auto_increment is not supported in Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.");
}
else if(ddlStatement.find("RENAME COLUMN") != string::npos)
{
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED, "The syntax rename column is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.");
}
else if(ddlStatement.find("MAX_ROWS") != string::npos || ddlStatement.find("MIN_ROWS") != string::npos)
{
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED, "The syntax min_rows/max_rows is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.");
}
else if(ddlStatement.find("REPLACE TABLE") != string::npos)
{
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED, "The syntax replace table is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.");
}
else if(ddlStatement.find("DROP COLUMN IF EXISTS") != string::npos)
{
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED, "The syntax drop column if exists is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.");
}
else
{
//@Bug 1888,1885. update error message
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED,
"The syntax or the data type(s) is not supported by Columnstore. Please check "
"the Columnstore syntax guide for supported syntax or data types.");
ci->alterTableState = cal_connection_info::NOT_ALTER;
ci->isAlter = false;
thd->raise_error_printf(ER_CHECK_NOT_IMPLEMENTED, "The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.");
}
ci->alterTableState = cal_connection_info::NOT_ALTER;
ci->isAlter = false;
}
return rc;

View File

@ -202,7 +202,7 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
TABLE* table = tables->table;
BRM::DBRM::refreshShmWithLock();
BRM::DBRM* emp = new BRM::DBRM();
std::unique_ptr<BRM::DBRM> emp(new BRM::DBRM());
if (!emp || !emp->isDBRMReady())
{
@ -224,7 +224,7 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
if (strcasecmp(item_field->field_name.str, "object_id") == 0)
{
cond_oid = fitem->arguments()[1]->val_int();
return generate_result(cond_oid, emp, table, thd);
return generate_result(cond_oid, emp.get(), table, thd);
}
}
else if (fitem->arguments()[1]->real_item()->type() == Item::FIELD_ITEM &&
@ -236,7 +236,7 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
if (strcasecmp(item_field->field_name.str, "object_id") == 0)
{
cond_oid = fitem->arguments()[0]->val_int();
return generate_result(cond_oid, emp, table, thd);
return generate_result(cond_oid, emp.get(), table, thd);
}
}
}
@ -250,7 +250,7 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
for (unsigned int i = 1; i < fitem->argument_count(); i++)
{
cond_oid = fitem->arguments()[i]->val_int();
int result = generate_result(cond_oid, emp, table, thd);
int result = generate_result(cond_oid, emp.get(), table, thd);
if (result)
return 1;
@ -266,7 +266,7 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
while (ss >> cond_oid)
{
int ret = generate_result(cond_oid, emp, table, thd);
int ret = generate_result(cond_oid, emp.get(), table, thd);
if (ret)
return 1;
@ -282,7 +282,7 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
for (BRM::OID_t oid = 3000; oid <= MaxOID; oid++)
{
int result = generate_result(oid, emp, table, thd);
int result = generate_result(oid, emp.get(), table, thd);
if (result)
return 1;

View File

@ -216,7 +216,7 @@ static int generate_result(BRM::OID_t oid, BRM::DBRM* emp, TABLE* table, THD* th
static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
{
BRM::DBRM::refreshShmWithLock();
BRM::DBRM* emp = new BRM::DBRM();
std::unique_ptr<BRM::DBRM> emp(new BRM::DBRM());
BRM::OID_t cond_oid = 0;
TABLE* table = tables->table;
@ -240,7 +240,7 @@ static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
if (strcasecmp(item_field->field_name.str, "object_id") == 0)
{
cond_oid = fitem->arguments()[1]->val_int();
return generate_result(cond_oid, emp, table, thd);
return generate_result(cond_oid, emp.get(), table, thd);
}
}
else if (fitem->arguments()[1]->real_item()->type() == Item::FIELD_ITEM &&
@ -252,7 +252,7 @@ static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
if (strcasecmp(item_field->field_name.str, "object_id") == 0)
{
cond_oid = fitem->arguments()[0]->val_int();
return generate_result(cond_oid, emp, table, thd);
return generate_result(cond_oid, emp.get(), table, thd);
}
}
}
@ -266,7 +266,7 @@ static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
for (unsigned int i = 1; i < fitem->argument_count(); i++)
{
cond_oid = fitem->arguments()[i]->val_int();
int result = generate_result(cond_oid, emp, table, thd);
int result = generate_result(cond_oid, emp.get(), table, thd);
if (result)
return 1;
@ -282,7 +282,7 @@ static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
while (ss >> cond_oid)
{
int ret = generate_result(cond_oid, emp, table, thd);
int ret = generate_result(cond_oid, emp.get(), table, thd);
if (ret)
return 1;
@ -300,14 +300,13 @@ static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
{
for (BRM::OID_t oid = 3000; oid <= MaxOID; oid++)
{
int result = generate_result(oid, emp, table, thd);
int result = generate_result(oid, emp.get(), table, thd);
if (result)
return 1;
}
}
delete emp;
return 0;
}

View File

@ -1155,6 +1155,8 @@ void PackageHandler::run()
logging::Message message(1);
args.add("dmlprocessor.cpp PackageHandler::run() package type");
args.add((uint64_t)fPackageType);
args.add(" ,transaction ID: ");
args.add(fTxnid);
args.add(e.what());
message.format(args);
ml.logErrorMessage(message);

View File

@ -5,6 +5,7 @@
# -------------------------------------------------------------- #
--source ../include/have_columnstore.inc
--source ../include/detect_maxscale.inc
--source ../include/functions.inc
#
--disable_warnings
DROP DATABASE IF EXISTS mcs4009_db;
@ -60,3 +61,4 @@ select calshowpartitions('lineitem','l_orderkey');
# -------------------------------------------------------------- #
#
DROP DATABASE mcs4009_db;
--source ../include/drop_functions.inc

View File

@ -5,6 +5,7 @@
# -------------------------------------------------------------- #
#
--source ../include/have_columnstore.inc
--source ../include/functions.inc
#
use tpch1m;
#
@ -256,3 +257,4 @@ select o_custkey, REGR_AVGX(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custk
select o_custkey, REGR_AVGX(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 PRECEDING AND 15 PRECEDING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
select o_custkey, REGR_AVGX(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 PRECEDING AND 15 FOLLOWING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
select o_custkey, REGR_AVGX(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 FOLLOWING AND 15 FOLLOWING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
--source ../include/drop_functions.inc

View File

@ -5,6 +5,7 @@
# -------------------------------------------------------------- #
#
--source ../include/have_columnstore.inc
--source ../include/functions.inc
#
use tpch1m;
#
@ -256,3 +257,4 @@ select o_custkey, REGR_AVGY(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custk
select o_custkey, REGR_AVGY(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 PRECEDING AND 15 PRECEDING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
select o_custkey, REGR_AVGY(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 PRECEDING AND 15 FOLLOWING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
select o_custkey, REGR_AVGY(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 FOLLOWING AND 15 FOLLOWING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
--source ../include/drop_functions.inc

View File

@ -5,6 +5,7 @@
# -------------------------------------------------------------- #
#
--source ../include/have_columnstore.inc
--source ../include/functions.inc
#
use tpch1m;
#
@ -256,3 +257,4 @@ select o_custkey, REGR_COUNT(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_cust
select o_custkey, REGR_COUNT(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 PRECEDING AND 15 PRECEDING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
select o_custkey, REGR_COUNT(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 PRECEDING AND 15 FOLLOWING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
select o_custkey, REGR_COUNT(o_custkey,o_orderkey) OVER (PARTITION BY abs(o_custkey)+2 ORDER BY o_custkey ,o_orderkey ,o_orderdate ROWS BETWEEN 15 FOLLOWING AND 15 FOLLOWING) from (select * from orders where o_custkey <= 20000) s order by 1, 2;
--source ../include/drop_functions.inc

View File

@ -1,5 +1,6 @@
DROP DATABASE IF EXISTS MCOL5744;
CREATE DATABASE MCOL5744;
USE MCOL5744;
SET old_mode='';
CREATE TABLE t(x text CHARACTER SET utf8 COLLATE utf8_general_ci) ENGINE=COLUMNSTORE;
SHOW CREATE TABLE t;

View File

@ -13,3 +13,6 @@ COUNT(*)
SELECT COUNT(*) FROM (SELECT * FROM t2 UNION ALL SELECT * FROM t1 PARTITION (p0)) tt;
COUNT(*)
20
REVOKE ALL PRIVILEGES ON *.* FROM 'cejuser'@'localhost';
DROP USER 'cejuser'@'localhost';
DROP DATABASE MCOL5886;

View File

@ -3,23 +3,29 @@ CREATE DATABASE analyze_table_db;
USE analyze_table_db;
create table t1 (a int, b int, c int) engine=columnstore;
insert into t1 values (1, 2, 3), (2, 2, 2), (2, 3, 4);
analyze table t1;
analyze table t1 PERSISTENT FOR ALL;
Table Op Msg_type Msg_text
analyze_table_db.t1 analyze status Engine-independent statistics collected
analyze_table_db.t1 analyze status OK
create table t2 (a int, b double) engine=columnstore;
insert into t2 values (2, 3), (3, 4);
analyze table t2;
analyze table t2 PERSISTENT FOR ALL;
Table Op Msg_type Msg_text
analyze_table_db.t2 analyze status Engine-independent statistics collected
analyze_table_db.t2 analyze status OK
create table t3 (a varchar(25)) engine=columnstore;
insert into t3 values ("a"), ("b");
analyze table t3;
analyze table t3 PERSISTENT FOR ALL;
Table Op Msg_type Msg_text
analyze_table_db.t3 analyze status Engine-independent statistics collected
analyze_table_db.t3 analyze status OK
analyze table t1, t2, t3;
analyze table t1, t2, t3 PERSISTENT FOR ALL;
Table Op Msg_type Msg_text
analyze_table_db.t1 analyze status Engine-independent statistics collected
analyze_table_db.t1 analyze status OK
analyze_table_db.t2 analyze status Engine-independent statistics collected
analyze_table_db.t2 analyze status OK
analyze_table_db.t3 analyze status Engine-independent statistics collected
analyze_table_db.t3 analyze status OK
DROP TABLE t1;
DROP TABLE t2;

View File

@ -497,3 +497,4 @@ DROP TABLE cs2;
DROP TABLE cs3;
DROP TABLE cs4;
DROP TABLE cs5;
DROP DATABASE test_mcol641_aggregate;

View File

@ -45,7 +45,7 @@ ALTER TABLE cs3 ADD COLUMN d5 DECIMAL(20,18) UNSIGNED;
Warnings:
Warning 1618 ZEROFILL is ignored in ColumnStore
ALTER TABLE cs1 RENAME COLUMN d7 TO d17;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax rename column is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ALTER TABLE cs2 ADD COLUMN (d5 DECIMAL(38,5), d6 DECIMAL(35,15));
ERROR 42000: The storage engine for the table doesn't support Multiple actions in alter table statement is currently not supported by Columnstore.
ALTER TABLE cs3 MODIFY d1 DECIMAL(38) SIGNED;

View File

@ -4,5 +4,5 @@ USE mcs116_db;
CREATE OR REPLACE TABLE IF NOT EXISTS t1 (a INT);
ERROR HY000: Incorrect usage of OR REPLACE and IF NOT EXISTS
CREATE OR REPLACE TABLE t1 (a INT)ENGINE=Columnstore;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax replace table is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
DROP DATABASE mcs116_db;

View File

@ -58,3 +58,4 @@ a c1
disconnect addconroot1;
disconnect addconroot2;
DROP DATABASE mcs13_db1;
DROP DATABASE mcs13_db2;

View File

@ -41,7 +41,7 @@ SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`col` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci
) ENGINE=InnoDB
INSERT INTO t1 VALUES(1);
SELECT * FROM t1;
col

View File

@ -20,7 +20,7 @@ ERROR HY000: Internal error: CAL0001: Alter table Failed: Changing the datatype
ALTER TABLE t1 CHANGE COLUMN c2 c2new CHAR(1);
ALTER TABLE t1 CHANGE COLUMN c2new c2 CHAR(1);
ALTER TABLE t1 DROP COLUMN IF EXISTS c11;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax drop column if exists is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ALTER TABLE t1 DROP COLUMN c11;
ALTER TABLE t1 DROP COLUMN IF EXISTS c11;
Warnings:

View File

@ -25,7 +25,7 @@ t3 CREATE TABLE `t3` (
CREATE TABLE t4(col1 INT PRIMARY KEY AUTO_INCREMENT)ENGINE=Columnstore;
ERROR 42000: Too many keys specified; max 0 keys allowed
CREATE TABLE t4(col1 INT )MAX_ROWS=10, ENGINE=Columnstore;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax min_rows/max_rows is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
CREATE TABLE t5(col1 INT )MIN_ROWS=10, ENGINE=Columnstore;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax min_rows/max_rows is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
DROP DATABASE mcs8_db;

View File

@ -20,7 +20,7 @@ ERROR 42000: Too many keys specified; max 0 keys allowed
CREATE TABLE t7(col1 INT UNIQUE)ENGINE=Columnstore;
ERROR 42000: Too many keys specified; max 0 keys allowed
CREATE TABLE t8(col1 INT)MIN_ROWS=10, ENGINE=Columnstore;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax min_rows/max_rows is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
CREATE TABLE t9(col1 INT)MAX_ROWS=1000, ENGINE=Columnstore;
ERROR 42000: The storage engine for the table doesn't support The syntax or the data type(s) is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
ERROR 42000: The storage engine for the table doesn't support The syntax min_rows/max_rows is not supported by Columnstore. Please check the Columnstore syntax guide for supported syntax or data types.
DROP DATABASE mcs9_db;

View File

@ -1,4 +1,12 @@
if ($MASTER_MYPORT)
{
# Running without --extern
--skip Only works with --extern (will work without extern when cleanup issues are fixed)
}
--source ../include/have_columnstore.inc
--source ../include/functions.inc
--source ../include/syscatalog_mysql.sql
SET default_storage_engine=columnstore;

View File

@ -4,8 +4,10 @@
--disable_warnings
DROP DATABASE IF EXISTS MCOL5744;
--enable_warnings
--source ../include/charset.inc
CREATE DATABASE MCOL5744;
USE MCOL5744;
SET old_mode='';

View File

@ -0,0 +1,2 @@
--skip-partition=0
--skip-sequence=0

View File

@ -1,11 +1,20 @@
-- source ../include/have_columnstore.inc
--source include/have_innodb.inc
--disable_warnings
DROP DATABASE IF EXISTS MCOL5886;
--enable_warnings
CREATE DATABASE MCOL5886;
USE MCOL5886;
if (!$MASTER_MYPORT)
{
# Running with --extern
let $MASTER_MYPORT=`SELECT @@port`;
}
--exec $MCS_MCSSETCONFIG CrossEngineSupport User 'cejuser'
--exec $MCS_MCSSETCONFIG CrossEngineSupport Password 'Vagrant1|0000001'
--exec $MCS_MCSSETCONFIG CrossEngineSupport Port $MASTER_MYPORT
--disable_warnings
CREATE USER IF NOT EXISTS'cejuser'@'localhost' IDENTIFIED BY 'Vagrant1|0000001';
@ -21,4 +30,6 @@ CREATE TABLE IF NOT EXISTS t2 ( a DECIMAL(12, 2), b int ) ENGINE=COLUMNSTORE;
SELECT COUNT(*) FROM (SELECT * FROM t1 PARTITION (p0)) tt;
SELECT COUNT(*) FROM (SELECT * FROM t2 UNION ALL SELECT * FROM t1 PARTITION (p0)) tt;
REVOKE ALL PRIVILEGES ON *.* FROM 'cejuser'@'localhost';
DROP USER 'cejuser'@'localhost';
DROP DATABASE MCOL5886;

View File

@ -9,18 +9,18 @@ USE analyze_table_db;
create table t1 (a int, b int, c int) engine=columnstore;
insert into t1 values (1, 2, 3), (2, 2, 2), (2, 3, 4);
analyze table t1;
analyze table t1 PERSISTENT FOR ALL;
create table t2 (a int, b double) engine=columnstore;
insert into t2 values (2, 3), (3, 4);
analyze table t2;
analyze table t2 PERSISTENT FOR ALL;
create table t3 (a varchar(25)) engine=columnstore;
insert into t3 values ("a"), ("b");
analyze table t3;
analyze table t3 PERSISTENT FOR ALL;
analyze table t1, t2, t3;
analyze table t1, t2, t3 PERSISTENT FOR ALL;
DROP TABLE t1;
DROP TABLE t2;

View File

@ -1,4 +1,5 @@
-- source ../include/have_columnstore.inc
--source ../include/functions.inc
--disable_warnings
DROP DATABASE IF EXISTS cal_test_db;
@ -23,3 +24,4 @@ select calgetsqlcount();
DROP TABLE t1;
DROP DATABASE cal_test_db;
--source ../include/drop_functions.inc

View File

@ -1,4 +1,6 @@
--source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS json_quote_db;

View File

@ -4,6 +4,7 @@
#
--source ../include/have_columnstore.inc
--source include/have_innodb.inc
--source ../include/charset.inc
#
# If the calpontsys database does not exist, let's create it.

View File

@ -221,3 +221,4 @@ DROP TABLE cs2;
DROP TABLE cs3;
DROP TABLE cs4;
DROP TABLE cs5;
DROP DATABASE test_mcol641_aggregate;

View File

@ -0,0 +1 @@
--secure-file-priv=''

View File

@ -4,6 +4,7 @@
-- source ../include/have_columnstore.inc
-- source include/have_innodb.inc
--source ../include/charset.inc
-- disable_warnings
drop database if exists test_mcol2000;

View File

@ -2,6 +2,7 @@
# Test comments
# Author: Bharath, bharath.bokka@mariadb.com
#
--source ../include/only_10.6.inc
-- source ../include/have_columnstore.inc
--disable_warnings

View File

@ -4,6 +4,7 @@
#
-- source ../include/have_columnstore.inc
-- source ../include/detect_maxscale.inc
--source ../include/charset.inc
set names utf8;

View File

@ -1,6 +1,7 @@
#
#This test verifies show databases and tables with engine=columnstore.
#
--source ../include/disable_11.4.inc
-- source ../include/have_columnstore.inc
--source ../include/detect_maxscale.inc
-- source include/have_innodb.inc

View File

@ -1,10 +1,11 @@
#
# Test ALTER TABLE schemas in various possible ways.
#
--source ../include/disable_11.4.inc
-- source ../include/have_columnstore.inc
-- source include/have_innodb.inc
-- source ../include/check_maxscale.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs12_db1;

View File

@ -3,7 +3,7 @@
#
-- source ../include/have_columnstore.inc
-- source ../include/check_maxscale.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs13_db1;
@ -83,6 +83,7 @@ disconnect addconroot2;
--disable_warnings
DROP DATABASE mcs13_db1;
DROP DATABASE mcs13_db2;
--enable_warnings
#

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs169_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs171_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs173_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs174_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs175_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs176_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs178_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs179_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs180_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs181_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs182_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs183_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs184_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs185_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs186_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs187_db;

View File

@ -5,7 +5,7 @@
-- source ../include/have_columnstore.inc
-- source include/have_innodb.inc
-- source ../include/check_maxscale.inc
--source ../include/charset.inc
SET default_storage_engine=InnoDB;
@ -50,7 +50,7 @@ USE mcs19_db;
--error ER_DBACCESS_DENIED_ERROR
CREATE DATABASE mcs19_db1;
CREATE TABLE t1(col INT) ENGINE=InnoDB;
--replace_regex /( COLLATE=latin1_swedish_ci)//
--replace_regex /(\ DEFAULT CHARSET(.*))//
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES(1);
SELECT * FROM t1;

View File

@ -2,7 +2,9 @@
# Test CREATE TABLE with all data types supported in Columnstore
# Author: Bharath, bharath.bokka@mariadb.com
#
--source ../include/disable_11.4.inc
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs1_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
#
# If the calpontsys database does not exist, let's create it.

View File

@ -3,6 +3,7 @@
# Author: Susil, susil.behera@mariadb.com
#
-- source ../include/have_columnstore.inc
--source include/have_innodb.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs22_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs271_db;

View File

@ -4,6 +4,7 @@
#
-- source ../include/have_columnstore.inc
--source ../include/detect_maxscale.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs28_db1;

View File

@ -2,6 +2,7 @@
# Update and Delete using Cross engine join
# Author: Bharath, bharath.bokka@mariadb.com
#
--source ../include/disable_11.4.inc
-- source include/have_innodb.inc
-- source ../include/have_columnstore.inc

View File

@ -3,6 +3,7 @@
#
-- source ../include/have_columnstore.inc
-- source include/have_innodb.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs5_db;

View File

@ -3,6 +3,7 @@
# Author: Bharath, bharath.bokka@mariadb.com
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs74_db;

View File

@ -2,6 +2,7 @@
#Test CREATE TABLE with metadata
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs7_db;

View File

@ -2,6 +2,7 @@
#Test CREATE TABLE with all supported constraints in Columnstore
#
-- source ../include/have_columnstore.inc
--source ../include/charset.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs8_db;

View File

@ -4,6 +4,7 @@
#
# Test MODA with various numeric types
-- source ../include/have_columnstore.inc
--source ../include/functions.inc
--disable_warnings
DROP DATABASE IF EXISTS mcs98_db;
--enable_warnings
@ -100,4 +101,4 @@ SELECT moda(floor(rl)) FROM t2;
SELECT t, moda(tu) 'q1' FROM t2 GROUP BY t HAVING moda(tu) > 5;
# Clean UP
DROP DATABASE mcs98_db;
--source ../include/drop_functions.inc

View File

@ -1,4 +1,6 @@
-- source ../include/have_columnstore.inc
--source ../include/functions.inc
--disable_warnings
DROP DATABASE IF EXISTS cal_test_db;
@ -23,3 +25,4 @@ select mcslastinsertid("t1");
DROP TABLE t1;
DROP DATABASE cal_test_db;
--source ../include/drop_functions.inc

View File

@ -0,0 +1,3 @@
--skip-partition=0
--skip-sequence=0

View File

@ -1,3 +1,7 @@
--source ../include/have_columnstore.inc
--source include/have_innodb.inc
--source ../include/functions.inc
--disable_warnings
DROP DATABASE IF EXISTS MCOL5776;
--enable_warnings
@ -99,3 +103,4 @@ INSERT INTO t(k) VALUES (1), (2), (2), (3), (3), (4), (4),(4),(4),(4),(995), (NU
SELECT k + k a FROM t GROUP BY a HAVING a >= 8;
DROP DATABASE MCOL5776;
--source ../include/drop_functions.inc

View File

@ -1,7 +1,7 @@
#
# MCOL-4740 UPDATE involving multi-tables returns wrong "Rows matched"
#
--source ../include/disable_11.4.inc
--source ../include/have_columnstore.inc
--disable_warnings

View File

@ -10,3 +10,4 @@ LOAD DATA LOCAL infile 'MTR_SUITE_DIR/../std_data/mcol-4741-part-2-empty-strings
SELECT COUNT(*) FROM t WHERE x='val';
COUNT(*)
2
DROP DATABASE IF EXISTS db4741;

View File

@ -16,4 +16,4 @@ SELECT COUNT(*) FROM t WHERE x='val';
--eval LOAD DATA LOCAL infile '$MTR_SUITE_DIR/../std_data/mcol-4741-part-2-empty-strings.txt' INTO TABLE t
SELECT COUNT(*) FROM t WHERE x='val';
DROP DATABASE IF EXISTS db4741;

View File

@ -0,0 +1 @@
--skip-sequence=0

View File

@ -32,3 +32,4 @@ Partner 5 2021-12-11 2 NULL NULL NULL
Person Focal Group 4 2021-12-11 NULL NULL NULL 4
PFG 4 2021-12-11 NULL NULL NULL NULL
Retail 4 2021-12-12 NULL 1 NULL NULL
DROP DATABASE mcol_5074_db;

View File

@ -41,3 +41,4 @@ select
case when da.mtn = 'Person Focal Group' then count( acct_id) end as PFG_active_accounts
from accnt da group by mtn, accts, act_created_dt order by mtn;
DROP DATABASE mcol_5074_db;

View File

@ -2,7 +2,7 @@
# MCOL-5480 LDI loads values incorrectly for MEDIUMINT, TIME and TIMESTAMP
# when cpimport is used for batch insert
#
--source ../include/disable_11.4.inc
--source ../include/have_columnstore.inc
--source ../include/detect_maxscale.inc

View File

@ -111,3 +111,5 @@ di_source_id brand_sku_id adjusted_dtm is_sit sit_uom sales_uom_daily_average
5304 1004 2024-01-01 1 0.2434 0.00000000
5389 1004 2024-01-01 1 0.2074 0.00000000
DROP DATABASE mcol_5669;
REVOKE ALL PRIVILEGES ON *.* FROM 'cejuser'@'localhost';
DROP USER 'cejuser'@'localhost';

View File

@ -1,3 +1,4 @@
--source ../include/disable_11.4.inc
--source include/have_innodb.inc
--source ../include/have_columnstore.inc
--disable_warnings
@ -110,3 +111,5 @@ INSERT INTO `zzz_999999` VALUES ('2023-12-18',5298,684963,1004,1,1,2166,1.000),(
) as z;
DROP DATABASE mcol_5669;
REVOKE ALL PRIVILEGES ON *.* FROM 'cejuser'@'localhost';
DROP USER 'cejuser'@'localhost';

View File

@ -1,4 +1,5 @@
--source ../include/have_columnstore.inc
--source ../include/functions.inc
--disable_warnings
DROP DATABASE IF EXISTS mcol_4465;
--enable_warnings
@ -9,3 +10,4 @@ INSERT INTO cs1 VALUES('i',0),('i',0),('ii',0);
SELECT c,regr_count(d,length(c)) f FROM cs1 GROUP BY 1 ORDER BY 1;
SELECT * FROM (SELECT c,regr_count(d,length(c)) f FROM cs1 GROUP BY 1 ORDER BY 1)a;
DROP DATABASE mcol_4465;
--source ../include/drop_functions.inc

View File

@ -1,4 +1,5 @@
-- source ../include/have_columnstore.inc
--source include/have_innodb.inc
--disable_warnings
DROP DATABASE IF EXISTS mcol_4622;
--enable_warnings

View File

@ -5,12 +5,13 @@
# -------------------------------------------------------------- #
#
--source ../include/have_columnstore.inc
--source ../include/functions.inc
#
USE tpch1;
#
# Negative test case for attempting to drop a non-existent partition.
select caldroppartitions('lineitem', '4.1.1');
select caldisablepartitions('lineitem', '4.1.1');
--source ../include/drop_functions.inc
#

View File

@ -6,6 +6,7 @@
#
--source ../include/have_columnstore.inc
--source ../include/detect_maxscale.inc
--source ../include/functions.inc
#
USE tpch1;
#
@ -36,4 +37,5 @@ select 'q6', birthdate, age, id, col8 from bug3657 where col8=2;
--disable_warnings
drop table if exists bug3657;
--enable_warnings
--source ../include/drop_functions.inc
#

View File

@ -5,10 +5,11 @@
# -------------------------------------------------------------- #
#
--source ../include/have_columnstore.inc
--source ../include/functions.inc
#
USE tpch1;
#
select depname, empno, moda(salary) over(partition by depname order by enroll_date) from empsalary order by depname, empno, enroll_date;
select avg(salary),depname, moda(salary) over(partition by depname order by enroll_date) from empsalary group by depname order by depname, avg(salary);
#
--source ../include/drop_functions.inc

Some files were not shown because too many files have changed in this diff Show More