1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-07 03:22:57 +03:00

Merge branch 'stable-23.10' into MCOL-4240

This commit is contained in:
Leonid Fedorov
2025-07-30 19:05:41 +04:00
committed by GitHub
176 changed files with 3877 additions and 7205 deletions

View File

@@ -16,21 +16,27 @@ local platforms_arm = {
};
local builddir = "verylongdirnameforverystrangecpackbehavior";
local get_build_command(command) = "bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/" + command + " ";
local clang(version) = [get_build_command("install_clang_deb.sh " + version),
get_build_command("update-clang-version.sh " + version + " 100"),
get_build_command("install_libc++.sh " + version),
"export CC=/usr/bin/clang",
"export CXX=/usr/bin/clang++"
];
local customEnvCommandsMap = {
// 'clang-18': ["bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/install_clang_deb.sh 18"],
"clang-20": ["bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/install_clang_deb.sh 20"],
"clang-20": clang("20"),
};
local customEnvCommands(envkey, builddir) =
local updateAlternatives = {
"clang-20": ["bash /mdb/" + builddir +
"/storage/columnstore/columnstore/build/update-clang-version.sh 20 100"],
};
(if (std.objectHas(customEnvCommandsMap, envkey))
then customEnvCommandsMap[envkey] + updateAlternatives[envkey] else []);
then customEnvCommandsMap[envkey] else []);
local customBootstrapParamsForExisitingPipelines(envkey) =
# errorprone if we pass --custom-cmake-flags twice, the last one will win
local customBootstrapMap = {
"ubuntu:24.04": "--custom-cmake-flags '-DCOLUMNSTORE_ASAN_FOR_UNITTESTS=YES'",
};
@@ -38,11 +44,17 @@ local customBootstrapParamsForExisitingPipelines(envkey) =
then customBootstrapMap[envkey] else "");
local customBootstrapParamsForAdditionalPipelinesMap = {
ASAN: "--asan",
ASan: "--asan",
TSAN: "--tsan",
UBSAN: "--ubsan",
UBSan: "--ubsan",
MSan: "--msan",
"libcpp": "--libcpp",
"gcc-toolset": "--gcc-toolset-for-rocky-8"
};
local customBuildFlags(buildKey) =
(if (std.objectHas(customBootstrapParamsForAdditionalPipelinesMap, buildKey))
then customBootstrapParamsForAdditionalPipelinesMap[buildKey] else "");
local any_branch = "**";
@@ -83,7 +95,7 @@ local make_clickable_link(link) = "echo -e '\\e]8;;" + link + "\\e\\\\" + link
local echo_running_on = ["echo running on ${DRONE_STAGE_MACHINE}",
make_clickable_link("https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#Instances:search=:${DRONE_STAGE_MACHINE};v=3;$case=tags:true%5C,client:false;$regex=tags:false%5C,client:false;sort=desc:launchTime")];
local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise", customBootstrapParams="", customBuildEnvCommandsMapKey="") = {
local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise", customBootstrapParamsKey="", customBuildEnvCommandsMapKey="") = {
local pkg_format = if (std.split(platform, ":")[0] == "rockylinux") then "rpm" else "deb",
local img = if (platform == "rockylinux:8") then platform else "detravi/" + std.strReplace(platform, "/", "-"),
local branch_ref = if (branch == any_branch) then current_branch else branch,
@@ -91,7 +103,9 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
local branchp = if (branch == "**") then "" else branch + "/",
local brancht = if (branch == "**") then "" else branch + "-",
local platformKey = std.strReplace(std.strReplace(platform, ":", ""), "/", "-"),
local result = platformKey + if customBuildEnvCommandsMapKey != "" then "_" + customBuildEnvCommandsMapKey else "",
local result = platformKey +
(if customBuildEnvCommandsMapKey != "" then "_" + customBuildEnvCommandsMapKey else "") +
(if customBootstrapParamsKey != "" then "_" + customBootstrapParamsKey else ""),
local packages_url = "https://cspkg.s3.amazonaws.com/" + branchp + event + "/${DRONE_BUILD_NUMBER}/" + server,
local publish_pkg_url = "https://cspkg.s3.amazonaws.com/index.html?prefix=" + branchp + event + "/${DRONE_BUILD_NUMBER}/" + server + "/" + arch + "/" + result + "/",
@@ -187,7 +201,7 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
local getContainerName(stepname) = stepname + "$${DRONE_BUILD_NUMBER}",
local prepareTestContainer(containerName, result, do_setup) =
'sh -c "apk add bash && bash /mdb/' + builddir + "/storage/columnstore/columnstore/build/prepare_test_container.sh" +
'sh -c "apk add bash && ' + get_build_command("prepare_test_container.sh") +
" --container-name " + containerName +
" --docker-image " + img +
" --result-path " + result +
@@ -195,7 +209,7 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
" --do-setup " + std.toString(do_setup) + '"',
local reportTestStage(containerName, result, stage) =
'sh -c "apk add bash && bash /mdb/' + builddir + '/storage/columnstore/columnstore/build/report_test_stage.sh' +
'sh -c "apk add bash && ' + get_build_command("report_test_stage.sh") +
' --container-name ' + containerName +
' --result-path ' + result +
' --stage ' + stage + '"',
@@ -218,7 +232,7 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
volumes: [pipeline._volumes.mdb, pipeline._volumes.docker],
commands: [
prepareTestContainer(getContainerName("smoke"), result, true),
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_smoke.sh" +
get_build_command("run_smoke.sh") +
' --container-name ' + getContainerName("smoke"),
],
},
@@ -290,7 +304,8 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
prepareTestContainer(getContainerName("mtr"), result, true),
'MTR_SUITE_LIST=$([ "$MTR_FULL_SUITE" == true ] && echo "' + mtr_full_set + '" || echo "$MTR_SUITE_LIST")',
'apk add bash && bash /mdb/' + builddir + '/storage/columnstore/columnstore/build/run_mtr.sh' +
'apk add bash &&' +
get_build_command("run_mtr.sh") +
' --container-name ' + getContainerName("mtr") +
' --distro ' + platform +
' --suite-list $${MTR_SUITE_LIST}' +
@@ -334,7 +349,8 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
"export REGRESSION_REF=$${REGRESSION_REF:-$$REGRESSION_REF_AUX}",
'echo "$$REGRESSION_REF"',
"apk add bash && bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_regression.sh" +
"apk add bash && " +
get_build_command("run_regression.sh") +
" --container-name " + getContainerName("regression") +
" --test-name " + name +
" --distro " + platform +
@@ -415,8 +431,8 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
},
commands: [
prepareTestContainer(getContainerName("cmapi"), result, true),
"apk add bash && bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_cmapi_test.sh" +
"apk add bash && " +
get_build_command("run_cmapi_test.sh") +
" --container-name " + getContainerName("cmapi") +
" --pkg-format " + pkg_format,
],
@@ -449,16 +465,16 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
},
commands: [
"echo $$DOCKER_PASSWORD | docker login --username $$DOCKER_LOGIN --password-stdin",
"apk add bash && bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/run_multi_node_mtr.sh " +
"--columnstore-image-name $${MCS_IMAGE_NAME} " +
"--distro " + platform,
"apk add bash && " +
get_build_command("run_multi_node_mtr.sh") +
" --columnstore-image-name $${MCS_IMAGE_NAME} " +
" --distro " + platform,
],
},
kind: "pipeline",
type: "docker",
name: std.join(" ", [branch, platform, event, arch, server, customBootstrapParams, customBuildEnvCommandsMapKey]),
name: std.join(" ", [branch, platform, event, arch, server, customBootstrapParamsKey, customBuildEnvCommandsMapKey]),
platform: { arch: arch },
clone: { depth: 10 },
steps: [
@@ -514,19 +530,22 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
SCCACHE_S3_USE_SSL: "true",
SCCACHE_S3_KEY_PREFIX: result + branch + server + arch,
},
# errorprone if we pass --custom-cmake-flags twice, the last one will win
commands: [
"mkdir /mdb/" + builddir + "/" + result,
]
+ customEnvCommands(customBuildEnvCommandsMapKey, builddir) +
[
'bash -c "set -o pipefail && bash /mdb/' + builddir + "/storage/columnstore/columnstore/build/bootstrap_mcs.sh " +
'bash -c "set -o pipefail && ' +
get_build_command("bootstrap_mcs.sh") +
"--build-type RelWithDebInfo " +
"--distro " + platform + " " +
"--build-packages --install-deps --sccache " +
"--build-path " + "/mdb/" + builddir + "/builddir " +
"--build-packages --install-deps --sccache" +
" " + customBootstrapParams +
" " + customBootstrapParamsForExisitingPipelines(platform) + " | " +
"/mdb/" + builddir + "/storage/columnstore/columnstore/build/ansi2txt.sh " +
" " + customBootstrapParamsForExisitingPipelines(platform) +
" " + customBuildFlags(customBootstrapParamsKey) +
" | " + get_build_command("ansi2txt.sh") +
"/mdb/" + builddir + "/" + result + '/build.log "',
],
},
@@ -539,7 +558,7 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
DEBIAN_FRONTEND: "noninteractive",
},
commands: [
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/build_cmapi.sh --distro " + platform,
get_build_command("build_cmapi.sh") + " --distro " + platform,
],
},
{
@@ -551,7 +570,7 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
},
volumes: [pipeline._volumes.mdb],
commands: [
"bash /mdb/" + builddir + "/storage/columnstore/columnstore/build/createrepo.sh --result " + result,
get_build_command("createrepo.sh") + " --result " + result,
],
},
{
@@ -592,12 +611,13 @@ local Pipeline(branch, platform, event, arch="amd64", server="10.6-enterprise",
[pipeline.cmapitest] +
[pipeline.cmapilog] +
[pipeline.publish("cmapilog")] +
(if (platform == "rockylinux:8" && arch == "amd64") then [pipeline.dockerfile] + [pipeline.dockerhub] + [pipeline.multi_node_mtr] else [pipeline.mtr] + [pipeline.mtrlog] + [pipeline.publish("mtrlog")]) +
(if (platform == "rockylinux:8" && arch == "amd64" && customBootstrapParamsKey == "gcc-toolset") then [pipeline.dockerfile] + [pipeline.dockerhub] + [pipeline.multi_node_mtr] else [pipeline.mtr] + [pipeline.mtrlog] + [pipeline.publish("mtrlog")]) +
[pipeline.regression(regression_tests[i], if (i == 0) then ["mtr", "publish pkg", "publish cmapi build"] else [regression_tests[i - 1]]) for i in indexes(regression_tests)] +
[pipeline.regressionlog] +
[pipeline.publish("regressionlog")] +
// [pipeline.upgrade(mdb_server_versions[i]) for i in indexes(mdb_server_versions)] +
// (if (std.length(mdb_server_versions) == 0) then [] else [pipeline.upgradelog] + [pipeline.publish("upgradelog")]) +
(if (event == "cron") then [pipeline.publish("regressionlog latest", "latest")] else [pipeline.publish("regressionlog")]),
(if (event == "cron") then [pipeline.publish("regressionlog latest", "latest")] else []),
volumes: [pipeline._volumes.mdb { temp: {} }, pipeline._volumes.docker { host: { path: "/var/run/docker.sock" } }],
trigger: {
@@ -672,3 +692,13 @@ local FinalPipeline(branch, event) = {
for triggeringEvent in events
for server in servers[current_branch]
]
+
[
Pipeline(b, platform, triggeringEvent, a, server, flag, "")
for a in ["amd64"]
for b in std.objectFields(platforms)
for platform in ["rockylinux:8"]
for flag in ["gcc-toolset"]
for triggeringEvent in events
for server in servers[current_branch]
]

View File

@@ -21,7 +21,7 @@ git clone https://github.com/MariaDB/server.git
MariaDB server contains many git submodules that need to be checked out with:
```bash
git submodule update --init --recursive --depth=1
git submodule update --init --recursive
```
@@ -46,12 +46,27 @@ git config --global --add safe.directory `pwd`
## Build
Regarding dependencies: If this is the first time building MCS on your system you should either use the `./install-deps.sh` script or pass `--install-deps` to the `bootstrap_mcs.sh` script.
The `bootstrap_mcs.sh` script can now do **two** main things:
For development convenience, building the MariaDB server with MCS can be done with:
1. **Build & install** ColumnStore into your system
```bash
cd server/storage/columnstore/columnstore
```
sudo -E build/bootstrap_mcs.sh
sudo build/bootstrap_mcs.sh --install-deps
```
Tested for: Ubuntu:20.04/22.04, CentOS:7, Debian:10/11, RockyLinux:8
2. **Build native OS packages** (RPM or DEB)
```bash
cd server/storage/columnstore/columnstore
sudo build/bootstrap_mcs.sh --install-deps --build-packages
# → find your .rpm/.deb files in the build directory
```
Note: Packages can be built only for the OS youre on—for so for example if you are running --build-packages on Rocky Linux it will produce RPMs for Rocky.
You can see the full options list in the script itself
> **Supported distros:**
> Ubuntu:20.04/22.04/24.04, Debian:11/12, Rocky Linux:8/9

View File

@@ -49,10 +49,15 @@ include(compiler_flags)
include(misc)
include(cpack_manage)
add_subdirectory(dbcon/mysql)
if(NOT __msg1_CS_NO_CXX20)
add_subdirectory(dbcon/mysql)
endif()
if(NOT TARGET columnstore)
return()
endif()
# releasenum is used by external scripts for various tasks. Leave it alone.
columnstore_install_file(${CMAKE_CURRENT_BINARY_DIR}/build/releasenum ${ENGINE_SUPPORTDIR})
columnstore_install_file(${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine ${ENGINE_SUPPORTDIR})
set(COMPONENTS
utils
@@ -85,8 +90,3 @@ set(COMPONENTS
foreach(component ${COMPONENTS})
add_subdirectory(${component})
endforeach()
add_dependencies(udf_mysql GenError)
add_dependencies(funcexp GenError)
add_dependencies(oamcpp GenError)
add_dependencies(regr_mysql GenError)

View File

@@ -0,0 +1,4 @@
extern "C" const char* __asan_default_options()
{
return "@COLUMNSTORE_STANDALONE_BINARIES_ASAN_COPTIONS@";
}

View File

@@ -52,6 +52,7 @@ optparse.define short=O long=static desc="Build all with static libraries" varia
optparse.define short=p long=build-packages desc="Build packages" variable=BUILD_PACKAGES default=false value=true
optparse.define short=P long=report-path desc="Path for storing reports and profiles" variable=REPORT_PATH default="/core"
optparse.define short=r long=restart-services variable=RESTART_SERVICES default=true value=false
optparse.define short=R long=gcc-toolset-for-rocky-8 variable=GCC_TOOLSET default=false value=true
optparse.define short=S long=skip-columnstore-submodules desc="Skip columnstore submodules initialization" variable=SKIP_SUBMODULES default=false value=true
optparse.define short=t long=build-type desc="Build Type: ${BUILD_TYPE_OPTIONS[*]}" variable=MCS_BUILD_TYPE
optparse.define short=T long=tsan desc="Build with TSAN" variable=TSAN default=false value=true
@@ -109,12 +110,12 @@ install_deps() {
fi
message_split
prereq=""
RPM_BUILD_DEPS="dnf install -y lz4 lz4-devel systemd-devel git make libaio-devel openssl-devel boost-devel bison \
RPM_BUILD_DEPS="lz4 lz4-devel systemd-devel git make libaio-devel openssl-devel boost-devel bison \
snappy-devel flex libcurl-devel libxml2-devel ncurses-devel automake libtool policycoreutils-devel \
rpm-build lsof iproute pam-devel perl-DBI cracklib-devel expect createrepo python3 checkpolicy \
cppunit-devel cmake3 libxcrypt-devel xz-devel zlib-devel libzstd-devel glibc-devel"
DEB_BUILD_DEPS="apt-get -y update && apt-get -y install build-essential automake libboost-all-dev \
DEB_BUILD_DEPS="build-essential automake libboost-all-dev \
bison cmake libncurses5-dev python3 libaio-dev libsystemd-dev libpcre2-dev libperl-dev libssl-dev libxml2-dev \
libkrb5-dev flex libpam-dev git libsnappy-dev libcurl4-openssl-dev libgtest-dev libcppunit-dev googletest \
libjemalloc-dev liblz-dev liblzo2-dev liblzma-dev liblz4-dev libbz2-dev libbenchmark-dev libdistro-info-perl \
@@ -122,16 +123,20 @@ install_deps() {
if [[ "$OS" == *"rockylinux:8"* || "$OS" == *"rocky:8"* ]]; then
command="dnf install -y curl 'dnf-command(config-manager)' && dnf config-manager --set-enabled powertools && \
dnf install -y gcc-toolset-${GCC_VERSION} libarchive cmake && . /opt/rh/gcc-toolset-${GCC_VERSION}/enable && \
${RPM_BUILD_DEPS}"
dnf install -y libarchive cmake ${RPM_BUILD_DEPS}"
if [[ $GCC_TOOLSET = false ]]; then
command="$command && dnf group install -y \"Development Tools\""
else
command="$command && dnf install -y gcc-toolset-${GCC_VERSION} && . /opt/rh/gcc-toolset-${GCC_VERSION}/enable"
fi
elif
[[ "$OS" == "rockylinux:9"* || "$OS" == "rocky:9"* ]]
then
command="dnf install -y 'dnf-command(config-manager)' && dnf config-manager --set-enabled crb && \
dnf install -y pcre2-devel gcc gcc-c++ curl-minimal && ${RPM_BUILD_DEPS}"
dnf install -y pcre2-devel gcc gcc-c++ curl-minimal ${RPM_BUILD_DEPS}"
elif [[ "$OS" == "debian:11"* ]] || [[ "$OS" == "debian:12"* ]] || [[ "$OS" == "ubuntu:20.04"* ]] || [[ "$OS" == "ubuntu:22.04"* ]] || [[ "$OS" == "ubuntu:24.04"* ]]; then
command="${DEB_BUILD_DEPS}"
command="apt-get -y update && apt-get -y install ${DEB_BUILD_DEPS}"
else
echo "Unsupported OS: $OS"
exit 17
@@ -267,10 +272,19 @@ modify_packaging() {
echo "Modifying_packaging..."
cd $MDB_SOURCE_PATH
# Bypass of debian version list check in autobake
if [[ $PKG_FORMAT == "deb" ]]; then
sed -i 's|.*-d storage/columnstore.*|elif [[ -d storage/columnstore/columnstore/debian ]]|' debian/autobake-deb.sh
fi
# patch to avoid fakeroot, which is using LD_PRELOAD for libfakeroot.so
# and eamtmydata which is using LD_PRELOAD for libeatmydata.so and this
# breaks intermediate build binaries to fail with "ASan runtime does not come first in initial library list
if [[ $PKG_FORMAT == "deb" && $ASAN = true ]]; then
sed -i 's|BUILDPACKAGE_DPKGCMD+=( "fakeroot" "--" )|echo "fakeroot was disabled for ASAN build"|' debian/autobake-deb.sh
sed -i 's|BUILDPACKAGE_DPKGCMD+=("eatmydata")|echo "eatmydata was disabled for ASAN build"|' debian/autobake-deb.sh
fi
#disable LTO for 22.04 for now
if [[ $OS == 'ubuntu:22.04' || $OS == 'ubuntu:24.04' ]]; then
for i in mariadb-plugin-columnstore mariadb-server mariadb-server-core mariadb mariadb-10.6; do
@@ -311,7 +325,6 @@ modify_packaging() {
}
construct_cmake_flags() {
MDB_CMAKE_FLAGS=(
-DBUILD_CONFIG=mysql_release
-DCMAKE_BUILD_TYPE=$MCS_BUILD_TYPE
@@ -333,6 +346,11 @@ construct_cmake_flags() {
-DWITH_WSREP=NO
)
if [[ $BUILD_PACKAGES = true ]]; then
MDB_CMAKE_FLAGS+=(-DCOLUMNSTORE_PACKAGES_BUILD=YES)
message "Building packages for Columnstore"
fi
if [[ $MAINTAINER_MODE = true ]]; then
MDB_CMAKE_FLAGS+=(-DCOLUMNSTORE_MAINTAINER=YES)
message "Columnstore maintainer mode on"
@@ -609,12 +627,20 @@ enable_columnstore_back() {
fix_config_files() {
message Fixing config files
THREAD_STACK_SIZE="20M"
# while packaging we have to patch configs in the sources to get them in the packakges
# for local builds, we patch config after installation in the systemdirs
if [[ $BUILD_PACKAGES = true ]]; then
MDB_SERVICE_FILE=$MDB_SOURCE_PATH/support-files/mariadb.service.in
COLUMNSTORE_CONFIG=$COLUMSNTORE_SOURCE_PATH/dbcon/mysql/columnstore.cnf
SANITIZERS_ABORT_ON_ERROR='0'
else
SYSTEMD_SERVICE_DIR="/usr/lib/systemd/system"
MDB_SERVICE_FILE=$SYSTEMD_SERVICE_DIR/mariadb.service
COLUMNSTORE_CONFIG=$CONFIG_DIR/columnstore.cnf
SANITIZERS_ABORT_ON_ERROR='1'
fi
if [[ $ASAN = true ]]; then
if grep -q thread_stack $COLUMNSTORE_CONFIG; then
@@ -628,7 +654,7 @@ fix_config_files() {
if grep -q ASAN $MDB_SERVICE_FILE; then
warn "MDB Server has ASAN options in $MDB_SERVICE_FILE, check it's compatibility"
else
echo Environment="'ASAN_OPTIONS=abort_on_error=1:disable_coredump=0,print_stats=false,detect_odr_violation=0,check_initialization_order=1,detect_stack_use_after_return=1,atexit=false,log_path=${REPORT_PATH}/asan.mariadb'" >>$MDB_SERVICE_FILE
echo Environment="'ASAN_OPTIONS=abort_on_error=$SANITIZERS_ABORT_ON_ERROR:disable_coredump=0,print_stats=false,detect_odr_violation=0,check_initialization_order=0,detect_stack_use_after_return=1,atexit=false,log_path=${REPORT_PATH}/asan.mariadb'" >>$MDB_SERVICE_FILE
message "ASAN options were added to $MDB_SERVICE_FILE"
fi
fi
@@ -637,7 +663,7 @@ fix_config_files() {
if grep -q TSAN $MDB_SERVICE_FILE; then
warn "MDB Server has TSAN options in $MDB_SERVICE_FILE, check it's compatibility"
else
echo Environment="'TSAN_OPTIONS=abort_on_error=0,log_path=${REPORT_PATH}/tsan.mariadb'" >>$MDB_SERVICE_FILE
echo Environment="'TSAN_OPTIONS=abort_on_error=$SANITIZERS_ABORT_ON_ERROR,log_path=${REPORT_PATH}/tsan.mariadb'" >>$MDB_SERVICE_FILE
message "TSAN options were added to $MDB_SERVICE_FILE"
fi
fi
@@ -646,7 +672,7 @@ fix_config_files() {
if grep -q UBSAN $MDB_SERVICE_FILE; then
warn "MDB Server has UBSAN options in $MDB_SERVICE_FILE, check it's compatibility"
else
echo Environment="'UBSAN_OPTIONS=abort_on_error=0,print_stacktrace=true,log_path=${REPORT_PATH}/ubsan.mariadb'" >>$MDB_SERVICE_FILE
echo Environment="'UBSAN_OPTIONS=abort_on_error=$SANITIZERS_ABORT_ON_ERROR,print_stacktrace=true,log_path=${REPORT_PATH}/ubsan.mariadb'" >>$MDB_SERVICE_FILE
message "UBSAN options were added to $MDB_SERVICE_FILE"
fi
fi
@@ -767,12 +793,12 @@ init_submodules
if [[ $BUILD_PACKAGES = true ]]; then
modify_packaging
( build_package && run_unit_tests )
fix_config_files
(build_package && run_unit_tests)
exit_code=$?
if [[ $SCCACHE = true ]]; then
sccache --show-stats
sccache --show-adv-stats
fi
exit $exit_code

View File

@@ -28,7 +28,7 @@ fi
select_pkg_format ${OS}
if [[ "$(arch)" == "arm64" ]]; then
if [[ "$(arch)" == "arm64" || "$(arch)" == "aarch64" ]]; then
export CC=gcc
fi
@@ -59,7 +59,7 @@ install_deps() {
if [ "$(arch)" == "x86_64" ]; then
PYTHON_URL="https://github.com/indygreg/python-build-standalone/releases/download/20220802/cpython-3.9.13+20220802-x86_64_v2-unknown-linux-gnu-pgo+lto-full.tar.zst"
elif [ "$(arch)" == "arm64" ]; then
elif [[ "$(arch)" == "arm64" || "$(arch)" == "aarch64" ]]; then
PYTHON_URL="https://github.com/indygreg/python-build-standalone/releases/download/20220802/cpython-3.9.13+20220802-aarch64-unknown-linux-gnu-noopt-full.tar.zst"
else
echo "Unsupported architecture: $(arch)"

View File

@@ -17,6 +17,8 @@ COLUMNSTORE_RPM_PACKAGES_PATH="/mdb/${BUILDDIR}/*.rpm"
CMAPI_RPM_PACKAGES_PATH="/mdb/${BUILDDIR}/storage/columnstore/columnstore/cmapi/*.rpm"
COLUMNSTORE_DEB_PACKAGES_PATH="/mdb/*.deb"
COLUMNSTORE_DEBUG_DEB_PACKAGES_PATH="/mdb/*.ddeb"
CMAPI_DEB_PACKAGES_PATH="/mdb/${BUILDDIR}/storage/columnstore/columnstore/cmapi/*.deb"
if [ "$EUID" -ne 0 ]; then
@@ -44,6 +46,7 @@ if [[ $(compgen -G "$COLUMNSTORE_RPM_PACKAGES_PATH") ]]; then
mv -v $COLUMNSTORE_RPM_PACKAGES_PATH "./${RESULT}/"
elif [[ $(compgen -G "$COLUMNSTORE_DEB_PACKAGES_PATH") ]]; then
mv -v $COLUMNSTORE_DEB_PACKAGES_PATH "./${RESULT}/"
mv -v $COLUMNSTORE_DEBUG_DEB_PACKAGES_PATH "./${RESULT}/" || true
else
echo "Columnstore packages are not found!"
fi
@@ -62,7 +65,10 @@ if [[ $(compgen -G "./${RESULT}/*.rpm") ]]; then
createrepo "./${RESULT}"
else
retry_eval 5 "apt update && apt install -y dpkg-dev"
dpkg-scanpackages "${RESULT}" | gzip >"./${RESULT}/Packages.gz"
dpkg-scanpackages "${RESULT}" >Packages
dpkg-scanpackages --type ddeb "${RESULT}" >>Packages
gzip -c Packages >"./${RESULT}/Packages.gz"
fi
mkdir -p "/drone/src/${RESULT}"

19
build/install_libc++.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash
set -eo pipefail
SCRIPT_LOCATION=$(dirname "$0")
source "$SCRIPT_LOCATION"/utils.sh
VERSION="$1"
if [[ $# -ne 1 ]]; then
echo "Please pass clang-version as a first parameter"
exit 1
fi
change_ubuntu_mirror us
message "Installing libc++-${VERSION}"
retry_eval 5 apt-get clean && apt-get update && apt-get install -y libc++-${VERSION}-dev libc++abi-${VERSION}-dev

View File

@@ -78,57 +78,57 @@ start_container() {
}
prepare_container() {
if [[ "$RESULT" != *rocky* ]]; then
if [[ "$RESULT" != *rocky* ]]; then
execInnerDocker "$CONTAINER_NAME" 'sed -i "s/exit 101/exit 0/g" /usr/sbin/policy-rc.d'
fi
fi
#list_cgroups
echo "Docker CGroups opts here"
ls -al /sys/fs/cgroup/cgroup.controllers || true
ls -al /sys/fs/cgroup/ || true
ls -al /sys/fs/cgroup/memory || true
#list_cgroups
echo "Docker CGroups opts here"
ls -al /sys/fs/cgroup/cgroup.controllers || true
ls -al /sys/fs/cgroup/ || true
ls -al /sys/fs/cgroup/memory || true
execInnerDocker "$CONTAINER_NAME" 'echo Inner Docker CGroups opts here'
execInnerDocker "$CONTAINER_NAME" 'ls -al /sys/fs/cgroup/cgroup.controllers || true'
execInnerDocker "$CONTAINER_NAME" 'ls -al /sys/fs/cgroup/ || true'
execInnerDocker "$CONTAINER_NAME" 'ls -al /sys/fs/cgroup/memory || true'
execInnerDocker "$CONTAINER_NAME" 'echo Inner Docker CGroups opts here'
execInnerDocker "$CONTAINER_NAME" 'ls -al /sys/fs/cgroup/cgroup.controllers || true'
execInnerDocker "$CONTAINER_NAME" 'ls -al /sys/fs/cgroup/ || true'
execInnerDocker "$CONTAINER_NAME" 'ls -al /sys/fs/cgroup/memory || true'
# Prepare core dump directory inside container
execInnerDocker "$CONTAINER_NAME" 'mkdir -p core && chmod 777 core'
docker cp "$COLUMNSTORE_SOURCE_PATH"/core_dumps/. "$CONTAINER_NAME":/
docker cp "$COLUMNSTORE_SOURCE_PATH"/build/utils.sh "$CONTAINER_NAME":/
docker cp "$COLUMNSTORE_SOURCE_PATH"/setup-repo.sh "$CONTAINER_NAME":/
# Prepare core dump directory inside container
execInnerDocker "$CONTAINER_NAME" 'mkdir -p core && chmod 777 core'
docker cp "$COLUMNSTORE_SOURCE_PATH"/core_dumps/. "$CONTAINER_NAME":/
docker cp "$COLUMNSTORE_SOURCE_PATH"/build/utils.sh "$CONTAINER_NAME":/
docker cp "$COLUMNSTORE_SOURCE_PATH"/setup-repo.sh "$CONTAINER_NAME":/
if [[ "$DO_SETUP" == "true" ]]; then
if [[ "$DO_SETUP" == "true" ]]; then
execInnerDocker "$CONTAINER_NAME" '/setup-repo.sh'
fi
fi
# install deps
if [[ "$RESULT" == *rocky* ]]; then
# install deps
if [[ "$RESULT" == *rocky* ]]; then
execInnerDockerWithRetry "$CONTAINER_NAME" 'yum --nobest update -y && yum --nobest install -y cracklib-dicts diffutils elfutils epel-release expect findutils iproute gawk gcc-c++ gdb hostname lz4 patch perl procps-ng rsyslog sudo tar wget which'
else
else
change_ubuntu_mirror_in_docker "$CONTAINER_NAME" "us"
execInnerDockerWithRetry "$CONTAINER_NAME" 'apt update -y && apt install -y elfutils expect findutils iproute2 g++ gawk gdb hostname liblz4-tool patch procps rsyslog sudo tar wget'
fi
fi
# Configure core dump naming pattern
execInnerDocker "$CONTAINER_NAME" 'sysctl -w kernel.core_pattern="/core/%E_${RESULT}_core_dump.%p"'
# Configure core dump naming pattern
execInnerDocker "$CONTAINER_NAME" 'sysctl -w kernel.core_pattern="/core/%E_${RESULT}_core_dump.%p"'
#Install columnstore in container
echo "Installing columnstore..."
if [[ "$RESULT" == *rocky* ]]; then
#Install columnstore in container
echo "Installing columnstore..."
if [[ "$RESULT" == *rocky* ]]; then
execInnerDockerWithRetry "$CONTAINER_NAME" 'yum install -y MariaDB-columnstore-engine MariaDB-test'
else
execInnerDockerWithRetry "$CONTAINER_NAME" 'apt update -y && apt install -y mariadb-plugin-columnstore mariadb-test mariadb-test-data'
fi
else
execInnerDockerWithRetry "$CONTAINER_NAME" 'apt update -y && apt install -y mariadb-plugin-columnstore mariadb-test mariadb-test-data mariadb-plugin-columnstore-dbgsym'
fi
sleep 5
echo "PrepareTestStage completed in $CONTAINER_NAME"
sleep 5
echo "PrepareTestStage completed in $CONTAINER_NAME"
}
if [[ -z $(docker ps -q --filter "name=${CONTAINER_NAME}") ]]; then
start_container
prepare_container
else message "Container ${CONTAINER_NAME} is already running, skipping prepare step"
else
message "Container ${CONTAINER_NAME} is already running, skipping prepare step"
fi

View File

@@ -28,6 +28,7 @@ BUILD_DIR="verylongdirnameforverystrangecpackbehavior"
prepare_regression() {
if execInnerDocker "${CONTAINER_NAME}" "test -f /mariadb-columnstore-regression-test/mysql/queries/queryTester.cpp"; then
message "Preparation for regression tests is already done — skipping"
return 0
fi
@@ -68,7 +69,7 @@ prepare_regression() {
run_test() {
message "Running test: ${TEST_NAME:-<none>}"
execInnerDocker "${CONTAINER_NAME}" "bash -c 'sleep 4800 && bash /save_stack.sh /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest/reg-logs/' &"
execInnerDocker "${CONTAINER_NAME}" "sleep 4800 && bash /save_stack.sh /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest/reg-logs/ &"
execInnerDockerNoTTY "${CONTAINER_NAME}" \
"export PRESERVE_LOGS=true && cd /mariadb-columnstore-regression-test/mysql/queries/nightly/alltest && \

View File

@@ -82,6 +82,16 @@ macro(columnstore_shared_library libname)
columnstore_install_target(${libname} ${ENGINE_LIBDIR})
endmacro()
macro(columnstore_static_library libname)
add_definitions(-fPIC -DPIC)
add_library(${libname} STATIC ${ARGN})
endmacro()
macro(columnstore_shared_library libname)
add_library(${libname} SHARED ${ARGN})
columnstore_install_target(${libname} ${ENGINE_LIBDIR})
endmacro()
macro(columnstore_library libname)
if(COLUMNSTORE_STATIC_LIBRARIES)
columnstore_static_library(${libname} ${ARGN})
@@ -99,7 +109,20 @@ macro(columnstore_link libname)
target_link_libraries(${libname} ${ARGN})
endmacro()
macro(columnstore_mysql_plugin_library libname)
add_library(${libname} SHARED ${ARGN})
columnstore_install_target(${libname} ${MARIADB_PLUGINDIR})
endmacro()
macro(columnstore_link libname)
target_link_libraries(${libname} ${ARGN})
endmacro()
macro(columnstore_executable executable_name)
if(WITH_COLUMNSTORE_ASAN)
add_executable(${executable_name} ${ARGN} ${CMAKE_BINARY_DIR}/asan_options.cpp)
else()
add_executable(${executable_name} ${ARGN})
endif()
columnstore_install_target(${executable_name} ${ENGINE_BINDIR})
endmacro()

View File

@@ -23,7 +23,7 @@ link_directories("${Boost_LIBRARY_DIRS}")
set(_cxxargs "-fPIC -DBOOST_NO_AUTO_PTR -fvisibility=default")
set(_b2args cxxflags=${_cxxargs};cflags=-fPIC;threading=multi;${_extra};toolset=${_toolset}
--without-python;--prefix=${INSTALL_LOCATION}
--without-mpi;--without-charconv;--without-python;--prefix=${INSTALL_LOCATION}
)
set(byproducts)
@@ -48,8 +48,8 @@ ExternalProject_Add(
BUILD_COMMAND ./b2 -q ${_b2args}
BUILD_IN_SOURCE TRUE
INSTALL_COMMAND ./b2 -q install ${_b2args}
LOG_BUILD TRUE
LOG_INSTALL TRUE
#LOG_BUILD TRUE
#LOG_INSTALL TRUE
EXCLUDE_FROM_ALL TRUE
${byproducts}
)

View File

@@ -771,12 +771,8 @@ else()
set(GIT_VERSION "source")
endif()
# releasenum is used by external scripts for various tasks. Leave it alone.
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/build/releasenum.in ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum IMMEDIATE)
columnstore_install_file(${CMAKE_CURRENT_BINARY_DIR}/build/releasenum ${ENGINE_SUPPORTDIR})
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h.in ${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/mcsconfig.h.in ${CMAKE_CURRENT_BINARY_DIR}/mcsconfig.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/gitversionEngine.in ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine IMMEDIATE)
columnstore_install_file(${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine ${ENGINE_SUPPORTDIR})

View File

@@ -12,8 +12,18 @@ if(WITH_THRIFT STREQUAL "system" OR WITH_THRIFT STREQUAL "auto")
set(THRIFT_LIBRARY "${THRIFT_LIBRARIES}")
return()
elseif(WITH_THRIFT STREQUAL "system")
FIND_PACKAGE(PkgConfig REQUIRED)
pkg_check_modules(THRIFT REQUIRED thrift)
if(THRIFT_FOUND)
add_custom_target(external_thrift)
set(THRIFT_INCLUDE_DIR "${THRIFT_INCLUDE_DIR}")
set(THRIFT_LIBRARY "${THRIFT_LIBRARIES}")
return()
else()
message(FATAL_ERROR "System Thrift requested but not found!")
endif()
endif()
endif()
include(ExternalProject)

View File

@@ -748,7 +748,12 @@ class ExtentMapController:
retry_count += 1
continue
elem_current_suffix = ret.stdout.decode("utf-8").rstrip()
elem_current_filename = f'{EM_PATH_SUFFIX}/{elem_current_suffix}_{element}'
suffix_for_file = elem_current_suffix
# The journal is always in the current directory, strip trailing A/B from suffix
if element == 'journal' and suffix_for_file.endswith(('A', 'B')):
suffix_for_file = suffix_for_file[:-1]
elem_current_filename = f'{EM_PATH_SUFFIX}/{suffix_for_file}_{element}'
# TODO: Remove conditional once container dispatcher
# uses non-root by default
@@ -774,8 +779,13 @@ class ExtentMapController:
)
elem_current_name = Path(MCS_BRM_CURRENT_PATH)
elem_current_filename = elem_current_name.read_text().rstrip()
suffix_for_file = elem_current_filename
# The journal is always in the current directory, strip trailing A/B from suffix
if element == 'journal' and suffix_for_file.endswith(('A', 'B')):
suffix_for_file = suffix_for_file[:-1]
elem_current_file = Path(
f'{MCS_EM_PATH}/{elem_current_filename}_{element}'
f'{MCS_EM_PATH}/{suffix_for_file}_{element}'
)
result = elem_current_file.read_bytes()

View File

@@ -66,7 +66,13 @@ class TestEMEndpoints(unittest.TestCase):
["smcat", S3_BRM_CURRENT_PATH], stdout=subprocess.PIPE
)
element_current_suffix = ret.stdout.decode("utf-8").rstrip()
element_current_filename = f'{EM_PATH_SUFFIX}/{element_current_suffix}_{element}'
suffix_for_file = element_current_suffix
# Journal is always singular, so strip trailing A/B from suffix
if element == 'journal' and suffix_for_file.endswith(('A', 'B')):
suffix_for_file = suffix_for_file[:-1]
element_current_filename = f'{EM_PATH_SUFFIX}/{suffix_for_file}_{element}'
ret = subprocess.run(
["smcat", element_current_filename], stdout=subprocess.PIPE
)
@@ -74,8 +80,14 @@ class TestEMEndpoints(unittest.TestCase):
else:
element_current_name = Path(MCS_BRM_CURRENT_PATH)
element_current_filename = element_current_name.read_text().rstrip()
suffix_for_file = element_current_filename
# Journal is always singular, so strip trailing A/B from suffix
if element == 'journal' and suffix_for_file.endswith(('A', 'B')):
suffix_for_file = suffix_for_file[:-1]
element_current_file = Path(
f'{MCS_EM_PATH}/{element_current_filename}_{element}'
f'{MCS_EM_PATH}/{suffix_for_file}_{element}'
)
result = element_current_file.read_bytes()
return result

View File

@@ -10,12 +10,12 @@ DUMPNAME=$4
STEP_NAME=$5
save_ansi_to_html() {
echo "<h2> $1 </h2>" >>"${FILENAME}"
cat "$DUMPNAME" | bash "${SCRIPT_LOCATION}"/ansi2html.sh --palette=solarized >>"${FILENAME}"
}
invoke_gdb_command() {
unbuffer gdb -x "${SCRIPT_LOCATION}"/gdbinit -q ${BINARY} --core ${COREDUMP} -ex "$1" -ex quit >>"$DUMPNAME"
gdb -x "${SCRIPT_LOCATION}"/gdbinit -q ${BINARY} --core ${COREDUMP} -ex "$1" -ex quit >>"$DUMPNAME"
}
echo "<h1> Step: ${STEP_NAME}<br> Binary name: ${BINARY}<br> </h1>" >>"${FILENAME}"

View File

@@ -21,7 +21,7 @@
#include <boost/any.hpp>
#include <boost/intrusive_ptr.hpp>
#include "exceptclasses.h"
#include "conststring.h"
#include "basic/conststring.h"
#include "mcs_datatype_basic.h"
#include "mcs_numeric_limits.h"
#include "mcs_data_condition.h"
@@ -861,7 +861,7 @@ class MinMaxPartitionInfo : public MinMaxInfo
uint64_t m_status;
public:
MinMaxPartitionInfo() : m_status(0){};
MinMaxPartitionInfo() : m_status(0) {};
explicit MinMaxPartitionInfo(const BRM::EMEntry& entry);
void set_invalid()
{
@@ -1167,7 +1167,8 @@ class TypeHandlerBit : public TypeHandler
idbassert(0); // QQ
return 1;
}
std::string format(const SimpleValue& /*v*/, const SystemCatalog::TypeAttributesStd& /*attr*/) const override
std::string format(const SimpleValue& /*v*/,
const SystemCatalog::TypeAttributesStd& /*attr*/) const override
{
return "0"; // QQ
}
@@ -1200,7 +1201,7 @@ class TypeHandlerBit : public TypeHandler
const ConvertFromStringParam& /*prm*/, const std::string& /*str*/,
bool& /*pushWarning*/) const override;
const uint8_t* getEmptyValueForType(const SystemCatalog::TypeAttributesStd& /*attr*/) const override
const uint8_t* getEmptyValueForType(const SystemCatalog::TypeAttributesStd& /*attr*/) const override
{
idbassert(0);
return nullptr;
@@ -2272,7 +2273,8 @@ class TypeHandlerReal : public TypeHandler
{
return {}; // QQ: real types were not handled in IDB_format()
}
std::string format(const SimpleValue& /*v*/, const SystemCatalog::TypeAttributesStd& /*attr*/) const override
std::string format(const SimpleValue& /*v*/,
const SystemCatalog::TypeAttributesStd& /*attr*/) const override
{
return "0"; // QQ
}
@@ -2568,7 +2570,8 @@ class TypeHandlerBlob : public TypeHandlerStr
{
return storeValueToFieldBlobText(row, pos, f);
}
std::string format(const SimpleValue& /*v*/, const SystemCatalog::TypeAttributesStd& /*attr*/) const override
std::string format(const SimpleValue& /*v*/,
const SystemCatalog::TypeAttributesStd& /*attr*/) const override
{
return "0"; // QQ
}
@@ -2594,7 +2597,8 @@ class TypeHandlerText : public TypeHandlerStr
{
return storeValueToFieldBlobText(row, pos, f);
}
std::string format(const SimpleValue& /*v*/, const SystemCatalog::TypeAttributesStd& /*attr*/) const override
std::string format(const SimpleValue& /*v*/,
const SystemCatalog::TypeAttributesStd& /*attr*/) const override
{
return "0"; // QQ
}
@@ -2625,7 +2629,8 @@ class TypeHandlerClob : public TypeHandlerStr
idbassert(0); // QQ
return 1;
}
std::string format(const SimpleValue& /*v*/, const SystemCatalog::TypeAttributesStd& /*attr*/) const override
std::string format(const SimpleValue& /*v*/,
const SystemCatalog::TypeAttributesStd& /*attr*/) const override
{
return "0"; // QQ
}

View File

@@ -17,8 +17,8 @@
#pragma once
#include "conststring.h"
#include "collation.h" // class Charset
#include "basic/conststring.h"
#include "mariadb_charset/collation.h" // class Charset
namespace datatypes
{

View File

@@ -42,4 +42,4 @@ columnstore_library(
${FLEX_ddl_scan_OUTPUTS}
)
columnstore_link(ddlpackage loggingcpp)
columnstore_link(ddlpackage loggingcpp messageqcpp)

View File

@@ -27,7 +27,7 @@
*/
#include <stdexcept>
#include "collation.h" // CHARSET_INFO
#include "mariadb_charset/collation.h" // CHARSET_INFO
#include "ddlpkg.h"
#include "mariadb_my_sys.h" // myf, MYF()
@@ -87,7 +87,7 @@ struct pass_to_bison
myf utf8_flag;
pass_to_bison(ParseTree* pt)
: fParseTree(pt), scanner(NULL), default_table_charset(NULL), utf8_flag(MYF(0)){};
: fParseTree(pt), scanner(NULL), default_table_charset(NULL), utf8_flag(MYF(0)) {};
};
class SqlParser

View File

@@ -13,4 +13,4 @@ set(ddlpackageproc_LIB_SRCS
)
columnstore_library(ddlpackageproc ${ddlpackageproc_LIB_SRCS})
columnstore_link(ddlpackageproc ${NETSNMP_LIBRARIES} loggingcpp)
columnstore_link(ddlpackageproc loggingcpp oamcpp messageqcpp)

View File

@@ -41,4 +41,4 @@ columnstore_library(
${FLEX_dml_scan_OUTPUTS}
)
columnstore_link(dmlpackage loggingcpp)
columnstore_link(dmlpackage loggingcpp messageqcpp)

View File

@@ -13,4 +13,4 @@ set(dmlpackageproc_LIB_SRCS
)
columnstore_library(dmlpackageproc ${dmlpackageproc_LIB_SRCS})
columnstore_link(dmlpackageproc ${NETSNMP_LIBRARIES} loggingcpp)
columnstore_link(dmlpackageproc loggingcpp oamcpp messageqcpp)

View File

@@ -46,4 +46,12 @@ set(execplan_LIB_SRCS
)
columnstore_library(execplan ${execplan_LIB_SRCS})
columnstore_link(execplan messageqcpp ${NETSNMP_LIBRARIES} ${ENGINE_DT_LIB} pron loggingcpp)
columnstore_link(
execplan
messageqcpp
${ENGINE_DT_LIB}
pron
oamcpp
loggingcpp
querytele
)

View File

@@ -49,7 +49,7 @@
#undef max
#include "mcs_datatype.h"
#include "collation.h" // CHARSET_INFO, class Charset
#include "mariadb_charset/collation.h" // CHARSET_INFO, class Charset
#include "nullstring.h"
class ExecPlanTest;
@@ -75,7 +75,8 @@ const int32_t IDB_VTABLE_ID = CNX_VTABLE_ID;
/**
* A struct to hold a list of table partitions.
*/
struct Partitions {
struct Partitions
{
std::vector<std::string> fPartNames;
void serialize(messageqcpp::ByteStream& b) const
{
@@ -96,12 +97,11 @@ struct Partitions {
b >> t;
fPartNames.push_back(t);
}
}
};
bool operator <(const Partitions& a, const Partitions& b);
bool operator ==(const Partitions& a, const Partitions& b);
bool operator !=(const Partitions& a, const Partitions& b); // for GCC9
bool operator<(const Partitions& a, const Partitions& b);
bool operator==(const Partitions& a, const Partitions& b);
bool operator!=(const Partitions& a, const Partitions& b); // for GCC9
/** The CalpontSystemCatalog class
*
@@ -259,9 +259,9 @@ class CalpontSystemCatalog : public datatypes::SystemCatalog
public:
ColType() = default;
ColType(const ColType& rhs);
ColType(int32_t colWidth_, int32_t scale_, int32_t precision_,
const ConstraintType& constraintType_, const DictOID& ddn_, int32_t colPosition_,
int32_t compressionType_, OID columnOID_, const ColDataType& colDataType_);
ColType(int32_t colWidth_, int32_t scale_, int32_t precision_, const ConstraintType& constraintType_,
const DictOID& ddn_, int32_t colPosition_, int32_t compressionType_, OID columnOID_,
const ColDataType& colDataType_);
ColType& operator=(const ColType& rhs);
CHARSET_INFO* getCharset() const;

View File

@@ -47,8 +47,6 @@ using namespace logging;
#include "clientrotator.h"
//#include "idb_mysql.h"
/** Debug macro */
#ifdef INFINIDB_DEBUG
#define IDEBUG(x) \

View File

@@ -42,7 +42,15 @@ class Row;
namespace execplan
{
// class Operator;
using ParseTreeWalker = void (*)(ParseTree* n);
using ParseTreeConstWalker = void (*)(const ParseTree* n);
using ParseTreePrinter = void (*)(const ParseTree* n, std::ostream& output);
using ParseTreeWalkerWithContext = void (*)(ParseTree* n, void* obj);
using ParseTreeConstWalkerWithContext = void (*)(const ParseTree* n, void* obj);
// class ParseTree;
/**
* @brief A template class template to represent an expression tree
*
@@ -133,31 +141,31 @@ class ParseTree
*
* postfix walking of a const tree
*/
inline void walk(void (*fn)(ParseTree* n)) const;
inline void walk(ParseTreeWalker fn) const;
/** walk the tree
*
* postfix walking of a non-const tree. This is for deleting the tree
*/
inline void walk(void (*fn)(const ParseTree* n)) const;
inline void walk(ParseTreeConstWalker fn) const;
/** output the tree
*
* take ostream argument to walk and output the tree
*/
inline void walk(void (*fn)(const ParseTree* n, std::ostream& output), std::ostream& output) const;
inline void walk(ParseTreePrinter fn, std::ostream& output) const;
/** output the tree
*
* take user argument to walk and output the tree
*/
inline void walk(void (*fn)(const ParseTree* n, void* obj), void* object) const;
inline void walk(ParseTreeConstWalkerWithContext fn, void* object) const;
/** output the tree
*
* take user argument to walk and output the tree
*/
inline void walk(void (*fn)(ParseTree* n, void* obj), void* object) const;
inline void walk(ParseTreeWalkerWithContext fn, void* object) const;
/** output the tree to string
* for debug purpose
@@ -448,7 +456,7 @@ inline ParseTree::~ParseTree()
}
}
inline void ParseTree::walk(void (*fn)(ParseTree* n)) const
inline void ParseTree::walk(ParseTreeWalker fn) const
{
DFSStack stack;
stack.emplace_back(const_cast<ParseTree*>(this));
@@ -477,7 +485,7 @@ inline void ParseTree::walk(void (*fn)(ParseTree* n)) const
}
}
inline void ParseTree::walk(void (*fn)(const ParseTree* n)) const
inline void ParseTree::walk(ParseTreeConstWalker fn) const
{
DFSStack stack;
stack.emplace_back(const_cast<ParseTree*>(this));
@@ -506,7 +514,7 @@ inline void ParseTree::walk(void (*fn)(const ParseTree* n)) const
}
}
inline void ParseTree::walk(void (*fn)(const ParseTree* n, std::ostream& output), std::ostream& output) const
inline void ParseTree::walk(ParseTreePrinter fn, std::ostream& output) const
{
DFSStack stack;
stack.emplace_back(const_cast<ParseTree*>(this));
@@ -535,7 +543,7 @@ inline void ParseTree::walk(void (*fn)(const ParseTree* n, std::ostream& output)
}
}
inline void ParseTree::walk(void (*fn)(const ParseTree* n, void* obj), void* obj) const
inline void ParseTree::walk(ParseTreeConstWalkerWithContext fn, void* obj) const
{
DFSStack stack;
stack.emplace_back(const_cast<ParseTree*>(this));
@@ -571,7 +579,7 @@ inline std::string ParseTree::toString() const
return oss.str();
}
inline void ParseTree::walk(void (*fn)(ParseTree* n, void* obj), void* obj) const
inline void ParseTree::walk(ParseTreeWalkerWithContext fn, void* obj) const
{
DFSStack stack;
stack.emplace_back(const_cast<ParseTree*>(this));

View File

@@ -39,7 +39,7 @@
#include "returnedcolumn.h"
#include "dataconvert.h"
#include "collation.h" // CHARSET_INFO
#include "mariadb_charset/collation.h" // CHARSET_INFO
namespace messageqcpp
{

View File

@@ -28,7 +28,7 @@ using namespace std;
#include "basic/string_utils.h"
#include "bytestream.h"
#include "collation.h"
#include "mariadb_charset/collation.h"
using namespace messageqcpp;

View File

@@ -68,7 +68,15 @@ columnstore_library(joblist ${joblist_LIB_SRCS})
target_include_directories(
joblist BEFORE PUBLIC ${OPENSSL_INCLUDE_DIR} ${LIBMARIADB_BININC_DIR} ${LIBMARIADB_SRCINC_DIR}
)
columnstore_link(joblist loggingcpp)
columnstore_link(
joblist
loggingcpp
boost_thread
oamcpp
querytele
messageqcpp
statistics_manager
)
if(WITH_ORDERBY_UT)
columnstore_executable(job_orderby_tests orderby-tests.cpp)

View File

@@ -71,7 +71,7 @@ using namespace dataconvert;
#include "jlf_tuplejoblist.h"
using namespace joblist;
#include "statistics.h"
#include "statistics_manager/statistics.h"
#ifdef __clang__
#pragma clang diagnostic push

View File

@@ -595,6 +595,8 @@ void TupleHashJoinStep::djsReaderFcn(int index)
for (auto& diskJoinStep : djs)
{
// TODO add and verify mutex
// boost::mutex::scoped_lock lk(*fStatsMutexPtr);
fExtendedInfo += diskJoinStep->extendedInfo();
fMiniInfo += diskJoinStep->miniInfo();
}

View File

@@ -43,6 +43,7 @@ set(libcalmysql_SRCS
is_columnstore_extents.cpp
columnstore_dataload.cpp
rulebased_optimizer.cpp
rbo_apply_parallel_ces.cpp
)
set_source_files_properties(ha_mcs.cpp PROPERTIES COMPILE_FLAGS "-fno-implicit-templates")
@@ -63,7 +64,7 @@ if(COMMAND mysql_add_plugin)
${PLUGIN_EXEC_LIBS}
${PLUGIN_WRITE_LIBS}
joblist_server
${NETSNMP_LIBRARIES}
statistics_manager
${MARIADB_CLIENT_LIBS}
${S3API_DEPS}
threadpool
@@ -89,10 +90,10 @@ else()
${S3API_DEPS}
${ENGINE_LDFLAGS}
${ENGINE_WRITE_LIBS}
${NETSNMP_LIBRARIES}
${SERVER_BUILD_DIR}/libservices/libmysqlservices.a
threadpool
loggingcpp
statistics_manager
marias3
)
# define this dummy target for standalone builds (ie, when mysql_add_plugin doesn't exist)

View File

@@ -444,6 +444,9 @@ SCSEP FromSubQuery::transform()
return csep;
}
// Insert column statistics
fGwip.mergeTableStatistics(gwi.tableStatisticsMap);
fGwip.subselectList.push_back(csep);
return csep;
}

View File

@@ -1828,7 +1828,7 @@ static int columnstore_init_func(void* p)
fprintf(stderr, "Columnstore: Started; Version: %s-%s\n", columnstore_version.c_str(),
columnstore_release.c_str());
plugin_ref plugin_innodb;
plugin_ref plugin_innodb = nullptr;
LEX_CSTRING name = {STRING_WITH_LEN("INNODB")};
if (get_innodb_queries_uses_mcs())
@@ -1873,13 +1873,15 @@ static int columnstore_init_func(void* p)
if (get_innodb_queries_uses_mcs())
{
std::cerr << "Columnstore: innodb_queries_uses_mcs is set, redirecting all InnoDB queries to Columnstore." << std::endl;
std::cerr << "Columnstore: innodb_queries_uses_mcs is set, redirecting all InnoDB queries to Columnstore."
<< std::endl;
auto* innodb_hton = plugin_hton(plugin_innodb);
int error = innodb_hton == nullptr; // Engine must exists!
if (error)
{
std::cerr << "Columnstore: innodb_queries_uses_mcs is set, but could not find InnoDB plugin." << std::endl;
std::cerr << "Columnstore: innodb_queries_uses_mcs is set, but could not find InnoDB plugin."
<< std::endl;
my_error(HA_ERR_INITIALIZATION, MYF(0), "Could not find storage engine %s", name.str);
}
innodb_hton->create_select = create_columnstore_select_handler;

View File

@@ -871,12 +871,14 @@ int ProcessDDLStatement(string& ddlStatement, string& schema, const string& /*ta
return rc;
}
#if MYSQL_VERSION_ID < 110400
// For TIMESTAMP, if no constraint is given, default to NOT NULL
if (createTable->fTableDef->fColumns[i]->fType->fType == ddlpackage::DDL_TIMESTAMP &&
createTable->fTableDef->fColumns[i]->fConstraints.empty())
{
createTable->fTableDef->fColumns[i]->fConstraints.push_back(new ColumnConstraintDef(DDL_NOT_NULL));
}
#endif
if (createTable->fTableDef->fColumns[i]->fDefaultValue)
{

View File

@@ -49,6 +49,7 @@ using namespace logging;
#define PREFER_MY_CONFIG_H
#include <my_config.h>
#include "idb_mysql.h"
#include "partition_element.h"
#include "partition_info.h"
@@ -6287,6 +6288,39 @@ int processLimitAndOffset(SELECT_LEX& select_lex, gp_walk_info& gwi, SCSEP& csep
return 0;
}
// Loop over available indexes to find and extract corresponding EI column statistics
// for the first column of the index if any.
// Statistics is stored in GWI context.
// Mock for ES 10.6
#if MYSQL_VERSION_ID >= 120401
void extractColumnStatistics(Item_field* ifp, gp_walk_info& gwi)
{
for (uint j = 0; j < ifp->field->table->s->keys; j++)
{
for (uint i = 0; i < ifp->field->table->s->key_info[j].usable_key_parts; i++)
{
if (ifp->field->table->s->key_info[j].key_part[i].fieldnr == ifp->field->field_index + 1)
{
if (i == 0 && ifp->field->read_stats)
{
assert(ifp->field->table->s);
auto* histogram = dynamic_cast<Histogram_json_hb*>(ifp->field->read_stats->histogram);
if (histogram)
{
SchemaAndTableName tableName = {ifp->field->table->s->db.str, ifp->field->table->s->table_name.str};
gwi.tableStatisticsMap[tableName][ifp->field->field_name.str] = *histogram;
}
}
}
}
}
}
#else
void extractColumnStatistics(Item_field* /*ifp*/, gp_walk_info& /*gwi*/)
{
}
#endif
/*@brief Process SELECT part of a query or sub-query */
/***********************************************************
* DESCRIPTION:
@@ -6376,21 +6410,20 @@ int processSelect(SELECT_LEX& select_lex, gp_walk_info& gwi, SCSEP& csep, vector
case Item::FIELD_ITEM:
{
Item_field* ifp = (Item_field*)item;
SimpleColumn* sc = NULL;
extractColumnStatistics(ifp, gwi);
// Handle * case
if (ifp->field_name.length && string(ifp->field_name.str) == "*")
{
collectAllCols(gwi, ifp);
break;
}
sc = buildSimpleColumn(ifp, gwi);
SimpleColumn* sc = buildSimpleColumn(ifp, gwi);
if (sc)
{
string fullname;
String str;
ifp->print(&str, QT_ORDINARY);
fullname = str.c_ptr();
string fullname(str.c_ptr());
if (!ifp->is_explicit_name()) // no alias
{
@@ -7413,7 +7446,6 @@ int cs_get_derived_plan(ha_columnstore_derived_handler* handler, THD* /*thd*/, S
return 0;
}
int cs_get_select_plan(ha_columnstore_select_handler* handler, THD* thd, SCSEP& csep, gp_walk_info& gwi,
bool isSelectLexUnit)
{
@@ -7448,7 +7480,8 @@ int cs_get_select_plan(ha_columnstore_select_handler* handler, THD* thd, SCSEP&
if (get_unstable_optimizer(thd))
{
bool csepWasOptimized = optimizer::optimizeCSEP(*csep);
optimizer::RBOptimizerContext ctx(gwi);
bool csepWasOptimized = optimizer::optimizeCSEP(*csep, ctx);
if (csep->traceOn() && csepWasOptimized)
{
cerr << "---------------- cs_get_select_plan optimized EXECUTION PLAN ----------------" << endl;

View File

@@ -128,13 +128,44 @@ using namespace funcexp;
#include "ha_mcs_sysvars.h"
#include "ha_mcs_datatype.h"
#include "statistics.h"
#include "ha_mcs_logging.h"
#include "ha_subquery.h"
#include "statistics_manager/statistics.h"
namespace cal_impl_if
{
extern bool nonConstFunc(Item_func* ifp);
void gp_walk_info::mergeTableStatistics(const TableStatisticsMap& aTableStatisticsMap)
{
for (auto& [schemaAndTableName, aColumnStatisticsMap]: aTableStatisticsMap)
{
auto tableStatisticsMapIt = tableStatisticsMap.find(schemaAndTableName);
if (tableStatisticsMapIt == tableStatisticsMap.end())
{
tableStatisticsMap[schemaAndTableName] = aColumnStatisticsMap;
}
else
{
for (auto& [columnName, histogram]: aColumnStatisticsMap)
{
tableStatisticsMapIt->second[columnName] = histogram;
}
}
}
}
std::optional<ColumnStatisticsMap> gp_walk_info::findStatisticsForATable(SchemaAndTableName& schemaAndTableName)
{
auto tableStatisticsMapIt = tableStatisticsMap.find(schemaAndTableName);
if (tableStatisticsMapIt == tableStatisticsMap.end())
{
return std::nullopt;
}
return {tableStatisticsMapIt->second};
}
}
namespace

View File

@@ -95,13 +95,36 @@ enum ClauseType
ORDER_BY
};
struct SchemaAndTableName {
std::string schema;
std::string table;
bool operator==(const SchemaAndTableName& other) const {
return schema == other.schema && table == other.table;
}
};
struct SchemaAndTableNameHash {
std::size_t operator()(const SchemaAndTableName& k) const {
return std::hash<std::string>()(k.schema + k.table);
}
};
typedef std::vector<JoinInfo> JoinInfoVec;
typedef dmlpackage::ColValuesList ColValuesList;
typedef dmlpackage::TableValuesMap TableValuesMap;
typedef std::map<execplan::CalpontSystemCatalog::TableAliasName, std::pair<int, TABLE_LIST*>> TableMap;
typedef std::tr1::unordered_map<TABLE_LIST*, std::vector<COND*>> TableOnExprList;
typedef std::tr1::unordered_map<TABLE_LIST*, uint> TableOuterJoinMap;
using ColumnName = std::string;
using ColumnStatisticsMap = std::unordered_map<ColumnName, Histogram_json_hb>;
using TableStatisticsMap = std::unordered_map<SchemaAndTableName, ColumnStatisticsMap, SchemaAndTableNameHash>;
// This structure is used to store MDB AST -> CSEP translation context.
// There is a column statistics for some columns in a query.
// As per 23.10.5 "some" means first column of the index in projection list of CSEP
// satisfies the condition of applyParallelCSEP RBO rule.
// Note that statistics must be merged from subquery/derived table
// to the statistics of the outer query.
struct gp_walk_info
{
execplan::CalpontSelectExecutionPlan::ReturnedColumnList returnedCols;
@@ -110,6 +133,7 @@ struct gp_walk_info
execplan::CalpontSelectExecutionPlan::ReturnedColumnList orderByCols;
std::vector<Item*> extSelAggColsItems;
execplan::CalpontSelectExecutionPlan::ColumnMap columnMap;
TableStatisticsMap tableStatisticsMap;
// This vector temporarily hold the projection columns to be added
// to the returnedCols vector for subquery processing. It will be appended
// to the end of returnedCols when the processing is finished.
@@ -200,7 +224,8 @@ struct gp_walk_info
SubQuery** subQueriesChain;
gp_walk_info(long timeZone_, SubQuery** subQueriesChain_)
: sessionid(0)
: tableStatisticsMap({})
, sessionid(0)
, fatalParseError(false)
, condPush(false)
, dropCond(false)
@@ -230,6 +255,9 @@ struct gp_walk_info
{
}
~gp_walk_info();
void mergeTableStatistics(const TableStatisticsMap& tableStatisticsMap);
std::optional<ColumnStatisticsMap> findStatisticsForATable(SchemaAndTableName& schemaAndTableName);
};
struct SubQueryChainHolder;

View File

@@ -96,6 +96,10 @@ SCSEP SelectSubQuery::transform()
return csep;
}
// Insert column statistics
fGwip.mergeTableStatistics(gwi.tableStatisticsMap);
// Insert subselect CSEP
fGwip.subselectList.push_back(csep);
// remove outer query tables

View File

@@ -14,10 +14,17 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
// One include file to deal with all the MySQL pollution of the
// global namespace
//
// Don't include ANY mysql headers anywhere except here!
/* One include file to deal with all the MySQL pollution of the
global namespace
Don't include ANY mysql headers anywhere except here!
WARN: if any cmake build target uses this include file,
GenError from server must be added to the target dependencies
to generate mysqld_error.h used below
*/
#pragma once
#ifdef TEST_MCSCONFIG_H
@@ -67,6 +74,37 @@
#include "rpl_rli.h"
#include "my_dbug.h"
#include "sql_show.h"
#if MYSQL_VERSION_ID >= 120401
#include "opt_histogram_json.h"
#else
// Mock Histogram_bucket for MySQL < 11.4
struct Histogram_bucket
{
std::string start_value;
double cum_fract;
longlong ndv;
};
class Histogram_json_hb
{
std::vector<Histogram_bucket> buckets;
std::string last_bucket_end_endp;
public:
const std::vector<Histogram_bucket>& get_json_histogram() const
{
return buckets;
}
const std::string& get_last_bucket_end_endp() const
{
return last_bucket_end_endp;
}
};
#endif
#pragma GCC diagnostic pop
// Now clean up the pollution as best we can...

View File

@@ -0,0 +1,260 @@
/* Copyright (C) 2025 MariaDB Corporation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include "rulebased_optimizer.h"
#include "constantcolumn.h"
#include "execplan/calpontselectexecutionplan.h"
#include "execplan/simplecolumn.h"
#include "existsfilter.h"
#include "logicoperator.h"
#include "operator.h"
#include "predicateoperator.h"
#include "simplefilter.h"
namespace optimizer
{
void applyParallelCES_exists(execplan::CalpontSelectExecutionPlan& csep, const size_t id);
static const std::string RewrittenSubTableAliasPrefix = "$added_sub_";
static const size_t MaxParallelFactor = 16;
bool tableIsInUnion(const execplan::CalpontSystemCatalog::TableAliasName& table,
execplan::CalpontSelectExecutionPlan& csep)
{
return std::any_of(csep.unionVec().begin(), csep.unionVec().end(),
[&table](const auto& unionUnit)
{
execplan::CalpontSelectExecutionPlan* unionUnitLocal =
dynamic_cast<execplan::CalpontSelectExecutionPlan*>(unionUnit.get());
bool tableIsPresented =
std::any_of(unionUnitLocal->tableList().begin(), unionUnitLocal->tableList().end(),
[&table](const auto& unionTable) { return unionTable == table; });
return tableIsPresented;
});
}
bool matchParallelCES(execplan::CalpontSelectExecutionPlan& csep)
{
auto tables = csep.tableList();
// This is leaf and there are no other tables at this level in neither UNION, nor derived table.
// TODO filter out CSEPs with orderBy, groupBy, having
// Filter out tables that were re-written.
return tables.size() == 1 && !tables[0].isColumnstore() && !tableIsInUnion(tables[0], csep);
}
// This routine produces a new ParseTree that is AND(lowerBand <= column, column <= upperBand)
// TODO add engine-independent statistics-derived ranges
execplan::ParseTree* filtersWithNewRangeAddedIfNeeded(execplan::SCSEP& csep, execplan::SimpleColumn& column,
std::pair<uint64_t, uint64_t>& bound)
{
auto tableKeyColumnLeftOp = new execplan::SimpleColumn(column);
tableKeyColumnLeftOp->resultType(column.resultType());
// TODO Nobody owns this allocation and cleanup only depends on delete in ParseTree nodes' dtors.
auto* filterColLeftOp = new execplan::ConstantColumnUInt(bound.second, 0, 0);
// set TZ
// There is a question with ownership of the const column
// TODO here we lost upper bound value if predicate is not changed to weak lt
execplan::SOP ltOp = boost::make_shared<execplan::Operator>(execplan::PredicateOperator("<"));
ltOp->setOpType(filterColLeftOp->resultType(), tableKeyColumnLeftOp->resultType());
ltOp->resultType(ltOp->operationType());
auto* sfr = new execplan::SimpleFilter(ltOp, tableKeyColumnLeftOp, filterColLeftOp);
auto tableKeyColumnRightOp = new execplan::SimpleColumn(column);
tableKeyColumnRightOp->resultType(column.resultType());
// TODO hardcoded column type and value
auto* filterColRightOp = new execplan::ConstantColumnUInt(bound.first, 0, 0);
execplan::SOP gtOp = boost::make_shared<execplan::Operator>(execplan::PredicateOperator(">="));
gtOp->setOpType(filterColRightOp->resultType(), tableKeyColumnRightOp->resultType());
gtOp->resultType(gtOp->operationType());
auto* sfl = new execplan::SimpleFilter(gtOp, tableKeyColumnRightOp, filterColRightOp);
execplan::ParseTree* ptp = new execplan::ParseTree(new execplan::LogicOperator("and"));
ptp->right(sfr);
ptp->left(sfl);
auto* currentFilters = csep->filters();
if (currentFilters)
{
execplan::ParseTree* andWithExistingFilters =
new execplan::ParseTree(new execplan::LogicOperator("and"), currentFilters, ptp);
return andWithExistingFilters;
}
return ptp;
}
// Looking for a projected column that comes first in an available index and has EI statistics
// INV nullptr signifies that no suitable column was found
execplan::SimpleColumn* findSuitableKeyColumn(execplan::CalpontSelectExecutionPlan& csep, optimizer::RBOptimizerContext& ctx)
{
for (auto& rc : csep.returnedCols())
{
auto* simpleColumn = dynamic_cast<execplan::SimpleColumn*>(rc.get());
if (simpleColumn)
{
cal_impl_if::SchemaAndTableName schemaAndTableNam = {simpleColumn->schemaName(), simpleColumn->tableName()};
auto columnStatistics = ctx.gwi.findStatisticsForATable(schemaAndTableNam);
if (!columnStatistics)
{
continue;
}
auto columnStatisticsIt = columnStatistics->find(simpleColumn->columnName());
if (columnStatisticsIt != columnStatistics->end())
{
return simpleColumn;
}
}
}
return nullptr;
}
// TODO char and other numerical types support
execplan::CalpontSelectExecutionPlan::SelectList makeUnionFromTable(
execplan::CalpontSelectExecutionPlan& csep, optimizer::RBOptimizerContext& ctx)
{
execplan::CalpontSelectExecutionPlan::SelectList unionVec;
// SC type controls an integral type used to produce suitable filters. The continuation of this function
// should become a template function based on SC type.
execplan::SimpleColumn* keyColumn = findSuitableKeyColumn(csep, ctx);
if (!keyColumn)
{
return unionVec;
}
cal_impl_if::SchemaAndTableName schemaAndTableName = {keyColumn->schemaName(), keyColumn->tableName()};
auto tableColumnsStatisticsIt = ctx.gwi.tableStatisticsMap.find(schemaAndTableName);
if (tableColumnsStatisticsIt == ctx.gwi.tableStatisticsMap.end())
{
return unionVec;
}
auto columnStatisticsIt = tableColumnsStatisticsIt->second.find(keyColumn->columnName());
if (columnStatisticsIt == tableColumnsStatisticsIt->second.end())
{
return unionVec;
}
auto columnStatistics = columnStatisticsIt->second;
// TODO configurable parallel factor via session variable
// NB now histogram size is the way to control parallel factor with 16 being the maximum
size_t numberOfUnionUnits = std::min(columnStatistics.get_json_histogram().size(), MaxParallelFactor);
size_t numberOfBucketsPerUnionUnit = columnStatistics.get_json_histogram().size() / numberOfUnionUnits;
// TODO char and other numerical types support
std::vector<std::pair<uint64_t, uint64_t>> bounds;
// Loop over buckets to produce filter ranges
for (size_t i = 0; i < numberOfUnionUnits - 1; ++i)
{
auto bucket = columnStatistics.get_json_histogram().begin() + i * numberOfBucketsPerUnionUnit;
auto endBucket = columnStatistics.get_json_histogram().begin() + (i + 1) * numberOfBucketsPerUnionUnit;
uint64_t currentLowerBound = *(uint32_t*)bucket->start_value.data();
uint64_t currentUpperBound = *(uint32_t*)endBucket->start_value.data();
bounds.push_back({currentLowerBound, currentUpperBound});
}
// Add last range
// NB despite the fact that currently Histogram_json_hb has the last bucket that has end as its start
auto lastBucket = columnStatistics.get_json_histogram().begin() + (numberOfUnionUnits - 1) * numberOfBucketsPerUnionUnit;
uint64_t currentLowerBound = *(uint32_t*)lastBucket->start_value.data();
uint64_t currentUpperBound = *(uint32_t*)columnStatistics.get_last_bucket_end_endp().data();
bounds.push_back({currentLowerBound, currentUpperBound});
for (auto& bound : bounds)
{
auto clonedCSEP = csep.cloneWORecursiveSelects();
// Add BETWEEN based on key column range
clonedCSEP->filters(filtersWithNewRangeAddedIfNeeded(clonedCSEP, *keyColumn, bound));
unionVec.push_back(clonedCSEP);
}
return unionVec;
}
void applyParallelCES(execplan::CalpontSelectExecutionPlan& csep, RBOptimizerContext& ctx)
{
auto tables = csep.tableList();
execplan::CalpontSelectExecutionPlan::TableList newTableList;
execplan::CalpontSelectExecutionPlan::SelectList newDerivedTableList;
execplan::CalpontSelectExecutionPlan::ReturnedColumnList newReturnedColumns;
// ATM Must be only 1 table
for (auto& table : tables)
{
if (!table.isColumnstore())
{
auto derivedSCEP = csep.cloneWORecursiveSelects();
// need to add a level here
std::string tableAlias = RewrittenSubTableAliasPrefix + table.schema + "_" + table.table + "_" +
std::to_string(ctx.uniqueId);
derivedSCEP->location(execplan::CalpontSelectExecutionPlan::FROM);
derivedSCEP->subType(execplan::CalpontSelectExecutionPlan::FROM_SUBS);
derivedSCEP->derivedTbAlias(tableAlias);
// Create a copy of the current leaf CSEP with additional filters to partition the key column
auto additionalUnionVec = makeUnionFromTable(csep, ctx);
derivedSCEP->unionVec().insert(derivedSCEP->unionVec().end(), additionalUnionVec.begin(),
additionalUnionVec.end());
size_t colPosition = 0;
// change parent to derived table columns
for (auto& rc : csep.returnedCols())
{
auto rcCloned = boost::make_shared<execplan::SimpleColumn>(*rc);
// TODO timezone and result type are not copied
// TODO add specific ctor for this functionality
rcCloned->tableName("");
rcCloned->schemaName("");
rcCloned->tableAlias(tableAlias);
rcCloned->colPosition(colPosition++);
rcCloned->resultType(rc->resultType());
newReturnedColumns.push_back(rcCloned);
}
newDerivedTableList.push_back(derivedSCEP);
execplan::CalpontSystemCatalog::TableAliasName tn = execplan::make_aliasview("", "", tableAlias, "");
newTableList.push_back(tn);
// Remove the filters as they were pushed down to union units
// This is inappropriate for EXISTS filter and join conditions
derivedSCEP->filters(nullptr);
}
}
// Remove the filters if necessary using csep.filters(nullptr) as they were pushed down to union units
// But this is inappropriate for EXISTS filter and join conditions
// There must be no derived at this point, so we can replace it with the new derived table list
csep.derivedTableList(newDerivedTableList);
// Replace table list with new table list populated with union units
csep.tableList(newTableList);
csep.returnedCols(newReturnedColumns);
}
} // namespace optimizer

View File

@@ -1,4 +1,4 @@
/* Copyright (C) 2020 MariaDB Corporation
/* Copyright (C) 2025 MariaDB Corporation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -17,18 +17,14 @@
#pragma once
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <string>
#include <limits.h>
#include <sstream>
#include "configcpp.h"
#include "liboamcpp.h"
#define PREFER_MY_CONFIG_H
#include <my_config.h>
#include "idb_mysql.h"
void getSystemNetworkConfig(FILE* pOutputFile);
void getModuleTypeConfig(FILE* pOutputFile);
void getStorageConfig(FILE* pOutputFile);
void getStorageStatus(FILE* pOutputFile);
bool checkLogStatus(std::string filename, std::string phase);
std::string getIPAddress(std::string hostName);
#include "execplan/calpontselectexecutionplan.h"
#include "rulebased_optimizer.h"
namespace optimizer {
bool matchParallelCES(execplan::CalpontSelectExecutionPlan& csep);
void applyParallelCES(execplan::CalpontSelectExecutionPlan& csep, optimizer::RBOptimizerContext& ctx);
}

View File

@@ -15,49 +15,57 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include "rulebased_optimizer.h"
#include "constantcolumn.h"
#include "execplan/calpontselectexecutionplan.h"
#include "execplan/simplecolumn.h"
#include "existsfilter.h"
#include "logicoperator.h"
#include "operator.h"
#include "predicateoperator.h"
#include "simplefilter.h"
#include "rulebased_optimizer.h"
#include "rbo_apply_parallel_ces.h"
namespace optimizer
{
static const std::string RewrittenSubTableAliasPrefix = "$added_sub_";
// Apply a list of rules to a CSEP
bool optimizeCSEPWithRules(execplan::CalpontSelectExecutionPlan& root, const std::vector<Rule>& rules)
bool optimizeCSEPWithRules(execplan::CalpontSelectExecutionPlan& root, const std::vector<Rule>& rules,
optimizer::RBOptimizerContext& ctx)
{
bool changed = false;
for (const auto& rule : rules)
{
changed |= rule.apply(root);
changed |= rule.apply(root, ctx);
}
return changed;
}
// high level API call for optimizer
bool optimizeCSEP(execplan::CalpontSelectExecutionPlan& root)
bool optimizeCSEP(execplan::CalpontSelectExecutionPlan& root, optimizer::RBOptimizerContext& ctx)
{
optimizer::Rule parallelCES{"parallelCES", optimizer::matchParallelCES, optimizer::applyParallelCES};
std::vector<Rule> rules = {parallelCES};
std::vector<optimizer::Rule> rules = {parallelCES};
return optimizeCSEPWithRules(root, rules);
return optimizeCSEPWithRules(root, rules, ctx);
}
// Apply iteratively until CSEP is converged by rule
bool Rule::apply(execplan::CalpontSelectExecutionPlan& root) const
bool Rule::apply(execplan::CalpontSelectExecutionPlan& root, optimizer::RBOptimizerContext& ctx) const
{
bool changedThisRound = false;
bool hasBeenApplied = false;
do
{
changedThisRound = walk(root);
changedThisRound = walk(root, ctx);
hasBeenApplied |= changedThisRound;
} while (changedThisRound && !applyOnlyOnce);
@@ -65,188 +73,45 @@ bool Rule::apply(execplan::CalpontSelectExecutionPlan& root) const
}
// DFS walk to match CSEP and apply rules if match
bool Rule::walk(execplan::CalpontSelectExecutionPlan& csep) const
bool Rule::walk(execplan::CalpontSelectExecutionPlan& csep, optimizer::RBOptimizerContext& ctx) const
{
bool rewrite = false;
for (auto& table : csep.derivedTableList())
std::stack<execplan::CalpontSelectExecutionPlan*> planStack;
planStack.push(&csep);
while (!planStack.empty())
{
execplan::CalpontSelectExecutionPlan* current = planStack.top();
planStack.pop();
for (auto& table : current->derivedTableList())
{
auto* csepPtr = dynamic_cast<execplan::CalpontSelectExecutionPlan*>(table.get());
if (!csepPtr)
if (csepPtr)
{
continue;
planStack.push(csepPtr);
}
}
auto& csepLocal = *csepPtr;
rewrite |= walk(csepLocal);
}
for (auto& unionUnit : csep.unionVec())
for (auto& unionUnit : current->unionVec())
{
auto* unionUnitPtr = dynamic_cast<execplan::CalpontSelectExecutionPlan*>(unionUnit.get());
if (!unionUnitPtr)
if (unionUnitPtr)
{
continue;
planStack.push(unionUnitPtr);
}
}
auto& unionUnitLocal = *unionUnitPtr;
rewrite |= walk(unionUnitLocal);
}
if (matchRule(csep))
if (matchRule(*current))
{
applyRule(csep);
applyRule(*current, ctx);
++ctx.uniqueId;
rewrite = true;
}
}
return rewrite;
}
bool tableIsInUnion(const execplan::CalpontSystemCatalog::TableAliasName& table,
execplan::CalpontSelectExecutionPlan& csep)
{
return std::any_of(csep.unionVec().begin(), csep.unionVec().end(),
[&table](const auto& unionUnit)
{
execplan::CalpontSelectExecutionPlan* unionUnitLocal =
dynamic_cast<execplan::CalpontSelectExecutionPlan*>(unionUnit.get());
bool tableIsPresented =
std::any_of(unionUnitLocal->tableList().begin(), unionUnitLocal->tableList().end(),
[&table](const auto& unionTable) { return unionTable == table; });
return tableIsPresented;
});
}
bool matchParallelCES(execplan::CalpontSelectExecutionPlan& csep)
{
auto tables = csep.tableList();
// This is leaf and there are no other tables at this level in neither UNION, nor derived table.
// WIP filter out CSEPs with orderBy, groupBy, having
// Filter out tables that were re-written.
return tables.size() == 1 && !tables[0].isColumnstore() && !tableIsInUnion(tables[0], csep);
}
// This routine produces a new ParseTree that is AND(lowerBand <= column, column <= upperBand)
// TODO add engine-independent statistics-derived ranges
execplan::ParseTree* filtersWithNewRangeAddedIfNeeded(execplan::SCSEP& csep)
{
// INV this is SimpleColumn we supply as an argument
// TODO find the suitable column using EI statistics.
auto* column = dynamic_cast<execplan::SimpleColumn*>(csep->returnedCols().front().get());
assert(column);
auto tableKeyColumnLeftOp = new execplan::SimpleColumn(*column);
tableKeyColumnLeftOp->resultType(column->resultType());
// TODO Nobody owns this allocation and cleanup only depends on delete in ParseTree nodes' dtors.
auto* filterColLeftOp = new execplan::ConstantColumnUInt(42ULL, 0, 0);
// set TZ
// There is a question with ownership of the const column
execplan::SOP ltOp = boost::make_shared<execplan::Operator>(execplan::PredicateOperator("<="));
ltOp->setOpType(filterColLeftOp->resultType(), tableKeyColumnLeftOp->resultType());
ltOp->resultType(ltOp->operationType());
auto* sfr = new execplan::SimpleFilter(ltOp, tableKeyColumnLeftOp, filterColLeftOp);
// auto tableKeyColumn = derivedSCEP->returnedCols().front();
auto tableKeyColumnRightOp = new execplan::SimpleColumn(*column);
tableKeyColumnRightOp->resultType(column->resultType());
// TODO hardcoded column type and value
auto* filterColRightOp = new execplan::ConstantColumnUInt(30ULL, 0, 0);
execplan::SOP gtOp = boost::make_shared<execplan::Operator>(execplan::PredicateOperator(">="));
gtOp->setOpType(filterColRightOp->resultType(), tableKeyColumnRightOp->resultType());
gtOp->resultType(gtOp->operationType());
auto* sfl = new execplan::SimpleFilter(gtOp, tableKeyColumnRightOp, filterColRightOp);
execplan::ParseTree* ptp = new execplan::ParseTree(new execplan::LogicOperator("and"));
ptp->right(sfr);
ptp->left(sfl);
auto* currentFilters = csep->filters();
if (currentFilters)
{
execplan::ParseTree* andWithExistingFilters =
new execplan::ParseTree(new execplan::LogicOperator("and"), currentFilters, ptp);
return andWithExistingFilters;
}
return ptp;
}
execplan::CalpontSelectExecutionPlan::SelectList makeUnionFromTable(
const size_t numberOfLegs, execplan::CalpontSelectExecutionPlan& csep)
{
execplan::CalpontSelectExecutionPlan::SelectList unionVec;
unionVec.reserve(numberOfLegs);
for (size_t i = 0; i < numberOfLegs; ++i)
{
auto clonedCSEP = csep.cloneWORecursiveSelects();
// Add BETWEEN based on key column range
clonedCSEP->filters(filtersWithNewRangeAddedIfNeeded(clonedCSEP));
unionVec.push_back(clonedCSEP);
}
return unionVec;
}
void applyParallelCES(execplan::CalpontSelectExecutionPlan& csep)
{
auto tables = csep.tableList();
execplan::CalpontSelectExecutionPlan::TableList newTableList;
execplan::CalpontSelectExecutionPlan::SelectList newDerivedTableList;
execplan::CalpontSelectExecutionPlan::ReturnedColumnList newReturnedColumns;
// ATM Must be only 1 table
for (auto& table : tables)
{
if (!table.isColumnstore())
{
auto derivedSCEP = csep.cloneWORecursiveSelects();
// need to add a level here
std::string tableAlias = RewrittenSubTableAliasPrefix + table.schema + "_" + table.table;
derivedSCEP->location(execplan::CalpontSelectExecutionPlan::FROM);
derivedSCEP->subType(execplan::CalpontSelectExecutionPlan::FROM_SUBS);
derivedSCEP->derivedTbAlias(tableAlias);
// TODO: hardcoded for now
size_t parallelFactor = 2;
// Create a copy of the current leaf CSEP with additional filters to partition the key column
auto additionalUnionVec = makeUnionFromTable(parallelFactor, csep);
derivedSCEP->unionVec().insert(derivedSCEP->unionVec().end(), additionalUnionVec.begin(),
additionalUnionVec.end());
size_t colPosition = 0;
// change parent to derived table columns
for (auto& rc : csep.returnedCols())
{
auto rcCloned = boost::make_shared<execplan::SimpleColumn>(*rc);
// TODO timezone and result type are not copied
// TODO add specific ctor for this functionality
rcCloned->tableName("");
rcCloned->schemaName("");
rcCloned->tableAlias(tableAlias);
rcCloned->colPosition(colPosition++);
rcCloned->resultType(rc->resultType());
newReturnedColumns.push_back(rcCloned);
}
newDerivedTableList.push_back(derivedSCEP);
execplan::CalpontSystemCatalog::TableAliasName tn = execplan::make_aliasview("", "", tableAlias, "");
newTableList.push_back(tn);
// Remove the filters as they were pushed down to union units
derivedSCEP->filters(nullptr);
}
}
// Remove the filters as they were pushed down to union units
csep.filters(nullptr);
// There must be no derived at this point.
csep.derivedTableList(newDerivedTableList);
// Replace table list with new table list populated with union units
csep.tableList(newTableList);
csep.returnedCols(newReturnedColumns);
}
} // namespace optimizer

View File

@@ -18,14 +18,31 @@
#pragma once
#include <string>
#define PREFER_MY_CONFIG_H
#include <my_config.h>
#include "idb_mysql.h"
#include "ha_mcs_impl_if.h"
#include "execplan/calpontselectexecutionplan.h"
namespace optimizer {
class RBOptimizerContext {
public:
RBOptimizerContext() = delete;
RBOptimizerContext(cal_impl_if::gp_walk_info& walk_info) : gwi(walk_info) {}
// gwi lifetime should be longer than optimizer context.
// In plugin runtime this is always true.
cal_impl_if::gp_walk_info& gwi;
uint64_t uniqueId {0};
};
struct Rule
{
using RuleMatcher = bool (*)(execplan::CalpontSelectExecutionPlan&);
using RuleApplier = void (*)(execplan::CalpontSelectExecutionPlan&);
using RuleApplier = void (*)(execplan::CalpontSelectExecutionPlan&, RBOptimizerContext&);
Rule(std::string&& name, RuleMatcher matchRule, RuleApplier applyRule)
: name(name), matchRule(matchRule), applyRule(applyRule) {};
@@ -39,15 +56,18 @@ struct Rule
Rule() = default;
Rule(const Rule&) = default;
Rule(Rule&&) = default;
std::string getName() const
{
return name;
}
Rule& operator=(const Rule&) = default;
Rule& operator=(Rule&&) = default;
bool apply(execplan::CalpontSelectExecutionPlan& csep) const;
bool walk(execplan::CalpontSelectExecutionPlan& csep) const;
bool apply(execplan::CalpontSelectExecutionPlan& csep, RBOptimizerContext& ctx) const;
bool walk(execplan::CalpontSelectExecutionPlan& csep, RBOptimizerContext& ctx) const;
};
bool matchParallelCES(execplan::CalpontSelectExecutionPlan& csep);
void applyParallelCES(execplan::CalpontSelectExecutionPlan& csep);
bool optimizeCSEP(execplan::CalpontSelectExecutionPlan& root);
bool optimizeCSEP(execplan::CalpontSelectExecutionPlan& root, RBOptimizerContext& ctx);
}

View File

@@ -6,4 +6,4 @@ set(DDLProc_SRCS ddlproc.cpp ddlprocessor.cpp ../utils/common/crashtrace.cpp)
columnstore_executable(DDLProc ${DDLProc_SRCS})
columnstore_link(DDLProc ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS} ${NETSNMP_LIBRARIES} threadpool loggingcpp)
columnstore_link(DDLProc ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS} threadpool loggingcpp)

View File

@@ -12,7 +12,6 @@ columnstore_link(
DMLProc
${ENGINE_LDFLAGS}
${ENGINE_WRITE_LIBS}
${NETSNMP_LIBRARIES}
threadpool
ddlcleanuputil
batchloader

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env bash
set -e
if test $(id -u) != 0 ; then
SUDO=sudo
fi
export LC_ALL=C
source /etc/os-release
case "$ID" in
ubuntu|debian)
echo "Using apt-get to install dependencies"
$SUDO apt-get update -y
$SUDO apt-get install -y build-essential automake libboost-all-dev bison \
cmake libncurses5-dev libreadline-dev libperl-dev libssl-dev \
libxml2-dev libkrb5-dev flex libpam-dev libreadline-dev libsnappy-dev \
libcurl4-openssl-dev
$SUDO apt-get install -y libboost-dev libboost-all-dev
case "$VERSION" in
*Bionic*)
echo "Install dependencies specific to Ubuntu Bionic"
;;
*Focal*)
echo "Install dependencies specific to Ubuntu Focal"
;;
*)
echo "Unknown OS distribution"
;;
esac
;;
centos)
echo "Using yum to install dependencies"
$SUDO yum -y install epel-release
$SUDO yum -y groupinstall "Development Tools"
$SUDO yum -y install bison ncurses-devel readline-devel perl-devel \
openssl-devel cmake libxml2-devel gperf libaio-devel libevent-devel \
python-devel ruby-devel tree wget pam-devel snappy-devel libicu \
wget strace ltrace gdb rsyslog net-tools openssh-server expect boost \
perl-DBI libicu boost-devel initscripts jemalloc-devel libcurl-devel
;;
opensuse*|suse|sles)
echo "Using zypper to install dependencies"
$SUDO zypper install -y bison ncurses-devel readline-devel \
libopenssl-devel cmake libxml2-devel gperf libaio-devel \
libevent-devel python-devel ruby-devel tree wget pam-devel \
snappy-devel libicu-devel libboost_system-devel \
libboost_filesystem-devel libboost_thread-devel libboost_regex-devel \
libboost_date_time-devel libboost_chrono-devel wget strace ltrace gdb \
rsyslog net-tools expect perl-DBI libicu boost-devel jemalloc-devel \
libcurl-devel gcc gcc-c++ automake libtool
;;
*)
echo "$ID is unknown, dependencies will have to be installed manually."
exit 1
;;
esac
echo "Dependencies have been installed successfully"

View File

@@ -0,0 +1,22 @@
DROP DATABASE IF EXISTS mcol4882;
CREATE DATABASE mcol4882;
USE mcol4882;
CREATE TABLE t1(col1 INT, col2 VARCHAR(64)) ENGINE=Columnstore;
SELECT * FROM t1;
col1 col2
1 test1
2 test2
3 test3
TRUNCATE t1;
SELECT * FROM t1;
col1 col2
2 test2
3 test3
TRUNCATE t1;
SELECT * FROM t1;
col1 col2
3 test3
TRUNCATE t1;
SELECT * FROM t1;
col1 col2
DROP DATABASE mcol4882;

View File

@@ -0,0 +1,15 @@
DROP DATABASE IF EXISTS mcol5164;
CREATE DATABASE mcol5164;
USE mcol5164;
CREATE TABLE t1(col1 INT, col2 VARCHAR(64)) ENGINE=Columnstore;
SELECT * FROM t1;
col1 col2
TRUNCATE t1;
SELECT * FROM t1;
col1 col2
11 test11-good
TRUNCATE t1;
SELECT * FROM t1;
col1 col2
11 test11-good
DROP DATABASE mcol5164;

View File

@@ -0,0 +1,20 @@
DROP DATABASE IF EXISTS mcol5164rep;
CREATE DATABASE mcol5164rep;
USE mcol5164rep;
CREATE TABLE t1(col1 INT, col2 VARCHAR(64)) ENGINE=Columnstore;
SELECT * FROM t1;
col1 col2
11 test11-good
Rejected rows:
0,test0,wrong
1,test1,wrong
2,test2,wrong
3,test3,wrong
4,test4,wrong
5,test5,wrong
6,test6,wrong
7,test7,wrong
8,test8,wrong
9,test9,wrong
10,test10,wrong
DROP DATABASE mcol5164rep;

View File

@@ -0,0 +1,43 @@
if (!$MYSQL_TEST_ROOT){
skip Should be run by root to execute cpimport;
}
-- source ../include/have_columnstore.inc
--disable_warnings
DROP DATABASE IF EXISTS mcol4882;
--enable_warnings
CREATE DATABASE mcol4882;
USE mcol4882;
CREATE TABLE t1(col1 INT, col2 VARCHAR(64)) ENGINE=Columnstore;
--exec printf '1,test1\n2,test2\n3,test3\n' > /tmp/mcol4882.csv
--disable_result_log
--exec $MCS_CPIMPORT -s , mcol4882 t1 /tmp/mcol4882.csv
--enable_result_log
SELECT * FROM t1;
TRUNCATE t1;
--disable_result_log
--exec $MCS_CPIMPORT -s , --headers -- mcol4882 t1 /tmp/mcol4882.csv
--enable_result_log
SELECT * FROM t1;
TRUNCATE t1;
--disable_result_log
--exec $MCS_CPIMPORT -s , --headers 2 mcol4882 t1 /tmp/mcol4882.csv
--enable_result_log
SELECT * FROM t1;
TRUNCATE t1;
--disable_result_log
--exec $MCS_CPIMPORT -s , --headers 5 mcol4882 t1 /tmp/mcol4882.csv
--enable_result_log
SELECT * FROM t1;
# Clean UP
--exec rm -f /tmp/mcol4882.csv
DROP DATABASE mcol4882;

View File

@@ -0,0 +1,41 @@
if (!$MYSQL_TEST_ROOT){
skip Should be run by root to execute cpimport;
}
--source ../include/have_columnstore.inc
--disable_warnings
DROP DATABASE IF EXISTS mcol5164;
--enable_warnings
CREATE DATABASE mcol5164;
USE mcol5164;
CREATE TABLE t1(col1 INT, col2 VARCHAR(64)) ENGINE=Columnstore;
--exec mkdir -p /tmp/mtr-mcol5164
--exec awk 'BEGIN { for (i = 0; i < 11; i++) { printf "%d,test%d,wrong\n", i, i; }; printf "%d,test%d-good\n", i, i; }' > /tmp/mtr-mcol5164/mcol5164.csv
--disable_result_log
--error 1 # exceeds default max-errors
--exec $MCS_CPIMPORT -s , -L /tmp/mtr-mcol5164 mcol5164 t1 /tmp/mtr-mcol5164/mcol5164.csv
--enable_result_log
SELECT * FROM t1;
TRUNCATE t1;
# implicitly set max-errors
--disable_result_log
--exec $MCS_CPIMPORT -s , -e 11 -L /tmp/mtr-mcol5164 mcol5164 t1 /tmp/mtr-mcol5164/mcol5164.csv
--enable_result_log
SELECT * FROM t1;
TRUNCATE t1;
# max-errors = all
--disable_result_log
--exec $MCS_CPIMPORT -s , -e all -L /tmp/mtr-mcol5164 mcol5164 t1 /tmp/mtr-mcol5164/mcol5164.csv
--enable_result_log
SELECT * FROM t1;
# Clean UP
--exec rm -rf /tmp/mtr-mcol5164
DROP DATABASE mcol5164;

View File

@@ -0,0 +1,35 @@
if (!$MYSQL_TEST_ROOT){
skip Should be run by root to execute cpimport;
}
--source ../include/have_columnstore.inc
--source ../include/check_multinode.inc
--if ($columnstore_nodes_count != 1) {
--skip This test makes sense when run on a single-node setup
--}
--disable_warnings
DROP DATABASE IF EXISTS mcol5164rep;
--enable_warnings
CREATE DATABASE mcol5164rep;
USE mcol5164rep;
CREATE TABLE t1(col1 INT, col2 VARCHAR(64)) ENGINE=Columnstore;
--exec mkdir -p /tmp/mtr-mcol5164rep
--exec awk 'BEGIN { for (i = 0; i < 11; i++) { printf "%d,test%d,wrong\n", i, i; }; printf "%d,test%d-good\n", i, i; }' > /tmp/mtr-mcol5164rep/mcol5164rep.csv
--disable_result_log
--exec $MCS_CPIMPORT -e all -s , -L /tmp/mtr-mcol5164rep mcol5164rep t1 /tmp/mtr-mcol5164rep/mcol5164rep.csv
--enable_result_log
SELECT * FROM t1;
--exec echo Rejected rows:
--exec cat /tmp/mtr-mcol5164rep/mcol5164rep.csv*.bad
--exec rm -f /tmp/mtr-mcol5164rep/mcol5164rep.csv*.err
--exec rm -f /tmp/mtr-mcol5164rep/mcol5164rep.csv*.bad
# Clean UP
--exec rm -rf /tmp/mtr-mcol5164rep
DROP DATABASE mcol5164rep;

View File

@@ -2,7 +2,6 @@
# MCOL-5480 LDI loads values incorrectly for MEDIUMINT, TIME and TIMESTAMP
# when cpimport is used for batch insert
#
--source ../include/disable_11.4.inc
--source ../include/have_columnstore.inc
--source ../include/detect_maxscale.inc

View File

@@ -0,0 +1,4 @@
--let CHECK_MULTINODE_RESULT=$MYSQL_TMP_DIR/check_multinode_result.inc
--exec echo "--let columnstore_nodes_count=`mcsGetConfig PrimitiveServers Count`" > $CHECK_MULTINODE_RESULT
--source $CHECK_MULTINODE_RESULT
--remove_file $CHECK_MULTINODE_RESULT

View File

@@ -24,9 +24,17 @@ if(WITH_COLUMNSTORE_ASAN)
endif(WITH_COLUMNSTORE_REPORT_PATH)
set(LD_PRELOAD_STRING "")
set(ALLOC_CONFIG
"ASAN_OPTIONS=abort_on_error=1:disable_coredump=0,print_stats=false,detect_odr_violation=0,check_initialization_order=1,detect_stack_use_after_return=1,atexit=false"
set(ABORT_ON_ERROR "1")
if(COLUMNSTORE_PACKAGES_BUILD)
set(ABORT_ON_ERROR "0")
endif()
set(ASAN_OPTIONS
abort_on_error=${ABORT_ON_ERROR}:disable_coredump=0:print_stats=0:detect_odr_violation=0:check_initialization_order=1:detect_stack_use_after_return=1:atexit=0
)
set(ALLOC_CONFIG ASAN_OPTIONS=${ASAN_OPTIONS})
set(PRIMPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${ASAN_PATH}.primproc)
set(DMLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${ASAN_PATH}.dmlproc)
set(DDLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${ASAN_PATH}.ddlproc)
@@ -34,6 +42,12 @@ if(WITH_COLUMNSTORE_ASAN)
set(CONTROLLERNODE_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${ASAN_PATH}.controllernode)
set(WORKERNODE_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${ASAN_PATH}.workernode)
set(STORAGEMANAGER_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${ASAN_PATH}.storagemanager)
set(COLUMNSTORE_STANDALONE_BINARIES_ASAN_COPTIONS ${ASAN_OPTIONS})
configure_file(
"${CMAKE_SOURCE_DIR}/storage/columnstore/columnstore/build/asan_options.cpp.in"
"${CMAKE_BINARY_DIR}/asan_options.cpp" @ONLY
)
endif()
if(WITH_UBSAN)
@@ -43,7 +57,8 @@ if(WITH_UBSAN)
endif(WITH_COLUMNSTORE_REPORT_PATH)
set(LD_PRELOAD_STRING "")
set(ALLOC_CONFIG "UBSAN_OPTIONS=abort_on_error=0,print_stacktrace=true")
set(UBSAN_OPTIONS abort_on_error=0:print_stacktrace=1)
set(ALLOC_CONFIG "UBSAN_OPTIONS="${UBSAN_OPTIONS})
set(PRIMPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.primproc)
set(DMLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.dmlproc)
set(DDLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.ddlproc)
@@ -60,7 +75,8 @@ if(WITH_TSAN)
endif(WITH_COLUMNSTORE_REPORT_PATH)
set(LD_PRELOAD_STRING "")
set(ALLOC_CONFIG "TSAN_OPTIONS=abort_on_error=0:log_path=${TSAN_PATH}")
set(TSAN_OPTIONS abort_on_error=0:log_path=${TSAN_PATH})
set(ALLOC_CONFIG TSAN_OPTIONS=${TSAN_OPTIONS})
set(PRIMPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${TSAN_PATH}.primproc)
set(DMLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${TSAN_PATH}.dmlproc)
set(DDLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${TSAN_PATH}.ddlproc)

View File

@@ -5,6 +5,6 @@ include_directories(${ENGINE_COMMON_INCLUDES})
set(oamcpp_LIB_SRCS liboamcpp.cpp oamcache.cpp)
columnstore_library(oamcpp ${oamcpp_LIB_SRCS})
columnstore_link(oamcpp loggingcpp)
columnstore_link(oamcpp loggingcpp idbboot)
target_compile_options(oamcpp PRIVATE -Wno-unused-result)

View File

@@ -1,16 +0,0 @@
include_directories(${ENGINE_COMMON_INCLUDES})
# ########## next target ###############
set(columnstoreSupport_SRCS columnstoreSupport.cpp mcsSupportUtil.cpp)
columnstore_executable(columnstoreSupport ${columnstoreSupport_SRCS})
target_compile_options(columnstoreSupport PRIVATE -Wno-unused-result)
columnstore_link(columnstoreSupport ${ENGINE_LDFLAGS} ncurses ${ENGINE_EXEC_LIBS})
columnstore_install_program(dbmsReport.sh, ${ENGINE_BINDIR})
columnstore_install_program(bulklogReport.sh, ${ENGINE_BINDIR})
columnstore_install_program(configReport.sh, ${ENGINE_BINDIR})
columnstore_install_program(hardwareReport.sh, ${ENGINE_BINDIR})
columnstore_install_program(logReport.sh, ${ENGINE_BINDIR})
columnstore_install_program(resourceReport.sh, ${ENGINE_BINDIR})

View File

@@ -1,39 +0,0 @@
#! /bin/sh
#
# $Id: logReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
SERVER=$1
else
SERVER="localhost"
fi
if [ $2 ] ; then
DATE=$2
else
DATE=" "
fi
#get temp directory
tmpDir=`mcsGetConfig SystemConfig SystemTempFileDir`
rm -f ${tmpDir}/logReport.log
{
echo " "
echo "******************** Alarm Report for $SERVER ********************"
echo " "
echo "-- Today's Alarms --"
echo " "
cat /var/log/mariadb/columnstore/alarm.log 2>/dev/null
if test -f /var/log/mariadb/columnstore/archive/alarm.log-$DATE ; then
echo "-- Archived Alarms --"
echo " "
cat /var/log/mariadb/columnstore/archive/alarm.log-$DATE 2>/dev/null
fi
} > ${tmpDir}/logReport.log
exit 0

View File

@@ -1,60 +0,0 @@
#!/bin/bash
#
# Estimates the row count for a given table. Uses number of extents * 8M for the estimate.
#
#
# Initialize variables.
#
if [ -z "$MYSQLCMD" ]; then
MYSQLCMD="mysql -u root"
fi
#
# Validate that there are two parameters - schema and table.
#
if [ $# -ne 2 ]; then
echo ""
echo "Reports the approximate row count for the given table."
echo ""
echo "Parameters:"
echo " Schema"
echo " Table"
fi
db=$1
table=$2
#
# Validate that the table exists.
#
sql="select count(*) from systable where \`schema\`='$db' and tablename='$table';"
count=`$MYSQLCMD calpontsys --skip-column-names -e "$sql;"`
if [ $count -le 0 ]; then
echo ""
echo "$db.$table does not exist in Columnstore."
echo ""
exit 1
fi
#
# Grab the objectid and column width for a column in the table.
#
sql="select objectid from syscolumn where \`schema\`='$db' and tablename='$table' limit 1;"
objectid=`$MYSQLCMD calpontsys --skip-column-names -e "$sql"`
sql="select columnlength from syscolumn where objectid=$objectid;"
colWidth=`$MYSQLCMD calpontsys --skip-column-names -e "$sql"`
#
# Use editem to count the extents.
#
extentCount=`editem -o $objectid | wc -l`
let extentCount-=2 # Take out the 2 extra rows for header and blank line at end.
let approximateRowCount=$extentCount*8192*1024;
echo ""
echo "Approximate row count for $db.$table is $approximateRowCount."
echo ""
exit 0

View File

@@ -1,30 +0,0 @@
#! /bin/sh
#
# $Id: logReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
{
if test -d /var/lib/columnstore/data/bulk ; then
echo " "
echo "-- Check for Errors in Bulk Logs --"
echo " "
echo "################# egrep '(ERR|CRIT)' /var/lib/columnstore/data/bulk/log/*.err #################"
echo " "
egrep '(ERR|CRIT)' /var/lib/columnstore/data/bulk/log/*.err 2>/dev/null
fi
} >> $OUT_FILE
exit 0

View File

@@ -1,897 +0,0 @@
/* Copyright (C) 2013 Calpont Corp. */
/* Copyright (C) 2016 MariaDB Corporation */
/******************************************************************************************
* $Id: columnstoreSupport.cpp 64 2006-10-12 22:21:51Z dhill $
*
*
*
******************************************************************************************/
/**
* @file
*/
#include <iterator>
#include <numeric>
#include <deque>
#include <iostream>
#include <ostream>
#include <fstream>
#include <cstdlib>
#include <string>
#include <limits.h>
#include <sstream>
#include <exception>
#include <stdexcept>
#include <vector>
#include "stdio.h"
#include "ctype.h"
#include <netdb.h>
#include <readline.h>
#include <boost/filesystem.hpp>
#include "mcsconfig.h"
#include "liboamcpp.h"
#include "configcpp.h"
#include "installdir.h"
#include "mcsSupportUtil.h"
#include "columnstoreversion.h"
using namespace std;
using namespace oam;
using namespace config;
typedef struct Child_Module_struct
{
std::string moduleName;
std::string moduleIP;
std::string hostName;
} ChildModule;
typedef std::vector<ChildModule> ChildModuleList;
string currentDate;
string systemName;
string localModule;
string localModuleHostName;
ChildModuleList childmodulelist;
ChildModuleList parentmodulelist;
ChildModule childmodule;
string rootPassword = "";
string debug_flag = "0";
string mysqlpw = " ";
string tmpDir;
int runningThreads = 0;
pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;
typedef boost::tuple<ChildModuleList::iterator, string> threadInfo_t;
bool LOCAL = false;
void* childReportThread(threadInfo_t* st)
{
assert(st);
ChildModuleList::iterator& list = boost::get<0>(*st);
string reportType = boost::get<1>(*st);
string remoteModuleName = (*list).moduleName;
string remoteModuleIP = (*list).moduleIP;
string remoteHostName = (*list).hostName;
pthread_mutex_lock(&mutex1);
runningThreads++;
// cout << "++ " << runningThreads << endl;
pthread_mutex_unlock(&mutex1);
string outputFile;
if (reportType == "log")
{
outputFile = remoteModuleName + "_" + reportType + "Report.tar.gz";
}
else
{
outputFile = remoteModuleName + "_" + reportType + "Report.txt";
FILE* pOutputFile = fopen(outputFile.c_str(), "a");
if (pOutputFile == NULL)
{
printf("Could not open file: %s", outputFile.c_str());
exit(1);
}
fprintf(pOutputFile,
"********************************************************************************\n"
"\n"
" System %s\n"
" columnstoreSupportReport script ran from Module %s on %s\n"
" SoftwareVersion = %s-%s"
"\n"
"********************************************************************************\n"
"\n"
" %s report\n"
"\n"
"********************************************************************************\n",
systemName.c_str(), localModule.c_str(), currentDate.c_str(), columnstore_version.c_str(),
columnstore_release.c_str(), reportType.c_str());
}
cout << "Get " + reportType + " report data for " + remoteModuleName + " " << endl;
string cmd = "remote_command.sh " + remoteModuleIP + " " + rootPassword + ";" + reportType + "Report.sh " +
remoteModuleName + "' " + debug_flag + " - forcetty";
int rtnCode = system(cmd.c_str());
if (WEXITSTATUS(rtnCode) != 0)
{
cout << "Error with running remote_command.sh, exiting..." << endl;
}
cmd = "remote_scp_get.sh " + remoteModuleIP + " " + rootPassword + " " + tmpDir + "/" + outputFile +
" > /dev/null 2>&1";
rtnCode = system(cmd.c_str());
if (WEXITSTATUS(rtnCode) != 0)
cout << "ERROR: failed to retrieve " << tmpDir << "/" << outputFile << " from " + remoteHostName << endl;
pthread_mutex_lock(&mutex1);
runningThreads--;
// cout << "-- " << runningThreads << endl;
pthread_mutex_unlock(&mutex1);
// exit thread
pthread_exit(0);
}
void* reportThread(string* reporttype)
{
assert(reporttype);
string reportType = *reporttype;
Oam oam;
pthread_mutex_lock(&mutex1);
runningThreads++;
// cout << "++ " << runningThreads << endl;
pthread_mutex_unlock(&mutex1);
string outputFile = localModule + "_" + reportType + "Report.txt";
FILE* pOutputFile = fopen(outputFile.c_str(), "a");
if (pOutputFile == NULL)
{
printf("Could not open file: %s", outputFile.c_str());
exit(1);
}
// get local report
fprintf(pOutputFile,
"********************************************************************************\n"
"\n"
" System %s\n"
" columnstoreSupportReport script ran from Module %s on %s\n"
" SoftwareVersion = %s-%s"
"\n"
"********************************************************************************\n"
"\n"
" %s report\n"
"\n"
"********************************************************************************\n",
systemName.c_str(), localModule.c_str(), currentDate.c_str(), columnstore_version.c_str(),
columnstore_release.c_str(), reportType.c_str());
fclose(pOutputFile);
// run on child servers and get report
if (!LOCAL)
{
ChildModuleList::iterator list1 = childmodulelist.begin();
for (; list1 != childmodulelist.end(); list1++)
{
threadInfo_t* st = new threadInfo_t;
*st = boost::make_tuple(list1, reportType);
pthread_t childreportthread;
int status = pthread_create(&childreportthread, NULL, (void* (*)(void*)) & childReportThread, st);
if (status != 0)
{
cout << "ERROR: childreportthread: pthread_create failed, return status = " + oam.itoa(status)
<< endl;
}
sleep(1);
}
}
if (reportType == "log")
{
// run log config on local server
cout << "Get log config data for " + localModule << endl;
string cmd = "logReport.sh " + localModule + " " + outputFile;
system(cmd.c_str());
}
else
{
string cmd = reportType + "Report.sh " + localModule + " " + outputFile;
system(cmd.c_str());
if (reportType == "config")
{
pOutputFile = fopen(outputFile.c_str(), "a");
if (pOutputFile == NULL)
{
printf("Could not open file: %s", outputFile.c_str());
exit(1);
}
fprintf(pOutputFile,
"\n******************** System Network Configuration ******************************\n\n");
getSystemNetworkConfig(pOutputFile);
fprintf(pOutputFile,
"\n******************** System Module Configure **********************************\n\n");
getModuleTypeConfig(pOutputFile);
fprintf(pOutputFile,
"\n******************** System Storage Configuration *****************************\n\n");
getStorageConfig(pOutputFile);
fprintf(pOutputFile,
"\n******************** System Storage Status ************************************\n\n");
getStorageStatus(pOutputFile);
// BT: most of this is tedious to collect and can be manually looked up in the debug.log file
// fprintf(pOutputFile,"\n******************** System Status
// ********************************************\n\n"); printSystemStatus(pOutputFile);
// printProcessStatus(pOutputFile);
// printAlarmSummary(pOutputFile);
//
// fprintf(pOutputFile,"\n******************** System Directories
// ***************************************\n\n"); getSystemDirectories(pOutputFile);
boost::filesystem::path configFile =
std::string(MCSSYSCONFDIR) + std::string("/columnstore/Columnstore.xml");
boost::filesystem::copy_file(configFile, "./Columnstore.xml",
boost::filesystem::copy_options::overwrite_existing);
boost::filesystem::path SMconfigFile =
std::string(MCSSYSCONFDIR) + std::string("/columnstore/storagemanager.cnf");
boost::filesystem::copy_file(SMconfigFile, "./storagemanager.cnf",
boost::filesystem::copy_options::overwrite_existing);
system("sed -i 's/.*aws_access_key_id.*/aws_access_key_id={PRIVATE}/' ./storagemanager.cnf");
system("sed -i 's/.*aws_secret_access_key.*/aws_secret_access_key={PRIVATE}/' ./storagemanager.cnf");
fclose(pOutputFile);
}
/*
// TODO: This can be ported from mcsadmin if needed most info included does not seem useful at this time
if (reportType == "resource" )
{
if (LOCAL)
{
fprintf(pOutputFile,"\n******************** mcsadmin getModuleResourceUsage
**************************\n\n"); string cmd = "mcsadmin getModuleResourceUsage " + localModule + " >> " +
outputFile; system(cmd.c_str());
}
else
{
fprintf(pOutputFile,"\n******************** mcsadmin getSystemResourceUsage
**************************\n\n"); string cmd = "mcsadmin getSystemResourceUsage >> " + outputFile;
system(cmd.c_str());
}
}*/
}
// exit thread
pthread_mutex_lock(&mutex1);
runningThreads--;
// cout << "-- " << runningThreads << endl;
pthread_mutex_unlock(&mutex1);
pthread_exit(0);
}
int main(int argc, char* argv[])
{
Oam oam;
Config* sysConfig = Config::makeConfig();
string SystemSection = "SystemConfig";
string InstallSection = "Installation";
bool HARDWARE = false;
bool CONFIG = false;
bool DBMS = false;
bool RESOURCE = false;
bool LOG = false;
bool BULKLOG = false;
bool HADOOP = false;
// get current time and date
time_t now;
now = time(NULL);
struct tm tm;
localtime_r(&now, &tm);
char timestamp[200];
strftime(timestamp, 200, "%m:%d:%y-%H:%M:%S", &tm);
currentDate = timestamp;
char helpArg[3] = "-h";
// Get System Name
try
{
oam.getSystemConfig("SystemName", systemName);
}
catch (...)
{
systemName = "unassigned";
}
// get Local Module Name and Server Install Indicator
string singleServerInstall = "n";
oamModuleInfo_t st;
try
{
st = oam.getModuleInfo();
localModule = boost::get<0>(st);
}
catch (...)
{
cout << endl << "**** Failed : Failed to read Local Module Name" << endl;
exit(-1);
}
if (argc == 1)
{
argv[1] = &helpArg[0];
argc = 2;
}
string DataFilePlugin;
try
{
DataFilePlugin = sysConfig->getConfig(SystemSection, "DataFilePlugin");
}
catch (...)
{
cout << "ERROR: Problem accessing Columnstore configuration file" << endl;
exit(-1);
}
tmpDir = startup::StartUp::tmpDir();
for (int i = 1; i < argc; i++)
{
if (string("-h") == argv[i])
{
cout << endl;
cout << "'columnstoreSupport' generates a Set of System Support Report Files in a tar file" << endl;
cout << "called columnstoreSupportReport.'system-name'.tar.gz in the local directory." << endl;
cout << "It should be run on the server with the DBRM front-end." << endl;
cout << "Check the Admin Guide for additional information." << endl;
cout << endl;
cout << "Usage: columnstoreSupport [-h][-a][-hw][-s][-c][-db][-r][-l][-bl][-lc][-p "
"'root-password'][-de]";
cout << endl;
cout << " -h help" << endl;
cout << " -a Output all Reports (excluding Bulk Logs Reports)" << endl;
cout << " -hw Output Hardware Reports only" << endl;
cout << " -c Output Configuration/Status Reports only" << endl;
cout << " -db Output DBMS Reports only" << endl;
cout << " -r Output Resource Reports only" << endl;
cout << " -l Output Columnstore Log/Alarms Reports only" << endl;
cout << " -bl Output Columnstore Bulk Log Reports only" << endl;
cout << " -lc Output Reports for Local Server only" << endl;
cout << " -p password (multi-server systems), root-password or 'ssh' to use 'ssh keys'"
<< endl;
cout << " -de Debug Flag" << endl;
exit(0);
}
else
{
if (string("-a") == argv[i])
{
HARDWARE = true;
CONFIG = true;
DBMS = true;
RESOURCE = true;
LOG = true;
HADOOP = (DataFilePlugin.empty() ? false : true);
}
else if (string("-hw") == argv[i])
HARDWARE = true;
else if (string("-c") == argv[i])
CONFIG = true;
else if (string("-db") == argv[i])
DBMS = true;
else if (string("-r") == argv[i])
RESOURCE = true;
else if (string("-l") == argv[i])
LOG = true;
else if (string("-bl") == argv[i])
BULKLOG = true;
else if (string("-lc") == argv[i])
LOCAL = true;
else if (string("-p") == argv[i])
{
i++;
if (argc == i)
{
cout << "ERROR: missing root password argument" << endl;
exit(-1);
}
rootPassword = argv[i];
// add single quote for special characters
if (rootPassword != "ssh")
{
rootPassword = "'" + rootPassword + "'";
}
}
else if (string("-mp") == argv[i])
{
i++;
if (argc == i)
{
cout << "ERROR: missing MariaDB Columnstore root user password argument" << endl;
exit(-1);
}
mysqlpw = argv[i];
mysqlpw = "'" + mysqlpw + "'";
}
else if (string("-de") == argv[i])
debug_flag = "1";
else if (string("-hd") == argv[i])
{
HADOOP = (DataFilePlugin.empty() ? false : true);
}
else
{
cout << "Invalid Option of '" << argv[i] << "', run with '-h' for help" << endl;
exit(1);
}
}
}
// default to -a if nothing is set
if (!HARDWARE && !CONFIG && !DBMS && !RESOURCE && !LOG && !BULKLOG && !HADOOP)
{
HARDWARE = true;
CONFIG = true;
DBMS = true;
RESOURCE = true;
LOG = true;
HADOOP = (DataFilePlugin.empty() ? false : true);
}
// get Parent OAM Module Name and setup of it's Custom OS files
string PrimaryUMModuleName;
try
{
PrimaryUMModuleName = sysConfig->getConfig(SystemSection, "PrimaryUMModuleName");
}
catch (...)
{
cout << "ERROR: Problem getting Parent OAM Module Name" << endl;
exit(-1);
}
if (PrimaryUMModuleName == "unassigned")
PrimaryUMModuleName = localModule;
if ((localModule != PrimaryUMModuleName) && DBMS)
{
char* pcommand = 0;
char* p;
string argument = "n";
while (true)
{
cout << endl << "You selected to get the DBMS data." << endl;
cout << "You need to run the columnstoreSupport command on module '" << PrimaryUMModuleName
<< "' to get that information." << endl;
cout << "Or you can proceed on to get all data except the DBMS." << endl;
pcommand = readline(" Do you want to proceed: (y or n) [n]: ");
if (pcommand && *pcommand)
{
p = strtok(pcommand, " ");
argument = p;
free(pcommand);
pcommand = 0;
}
if (pcommand)
{
free(pcommand);
pcommand = 0;
}
if (argument == "y")
{
cout << endl;
break;
}
else if (argument == "n")
exit(1);
}
}
// get number of worker-nodes, will tell us if a single server system
// get Parent OAM Module Name and setup of it's Custom OS files
try
{
string NumWorkers = sysConfig->getConfig("DBRM_Controller", "NumWorkers");
if (NumWorkers == "1")
singleServerInstall = "y";
}
catch (...)
{
}
if (singleServerInstall == "n" && !LOCAL)
if (HARDWARE || CONFIG || RESOURCE || LOG || HADOOP)
if (rootPassword.empty())
{
cout << "ERROR: Multi-Module System, Password Argument required or use '-lc' option, check help for "
"more information"
<< endl;
exit(-1);
}
// get Parent OAM Module Name and setup of it's Custom OS files
// string parentOAMModuleName;
ChildModule parentOAMModule;
try
{
parentOAMModule.moduleName = sysConfig->getConfig(SystemSection, "ParentOAMModuleName");
}
catch (...)
{
cout << "ERROR: Problem getting Parent OAM Module Name" << endl;
exit(-1);
}
// Get list of configured system modules
SystemModuleTypeConfig sysModuleTypeConfig;
try
{
oam.getSystemConfig(sysModuleTypeConfig);
}
catch (...)
{
cout << "ERROR: Problem reading the Columnstore System Configuration file" << endl;
exit(-1);
}
string ModuleSection = "SystemModuleConfig";
for (unsigned int i = 0; i < sysModuleTypeConfig.moduletypeconfig.size(); i++)
{
string moduleType = sysModuleTypeConfig.moduletypeconfig[i].ModuleType;
int moduleCount = sysModuleTypeConfig.moduletypeconfig[i].ModuleCount;
if (moduleCount == 0)
// no modules equipped for this Module Type, skip
continue;
// get IP addresses and Host Names
DeviceNetworkList::iterator listPT = sysModuleTypeConfig.moduletypeconfig[i].ModuleNetworkList.begin();
for (; listPT != sysModuleTypeConfig.moduletypeconfig[i].ModuleNetworkList.end(); listPT++)
{
string moduleName = (*listPT).DeviceName;
HostConfigList::iterator pt1 = (*listPT).hostConfigList.begin();
string moduleIPAddr = (*pt1).IPAddr;
string moduleHostName = (*pt1).HostName;
if (moduleName == localModule)
{
localModuleHostName = moduleHostName;
}
// save Child modules
if (moduleName != localModule && moduleType != "xm")
{
childmodule.moduleName = moduleName;
childmodule.moduleIP = moduleIPAddr;
childmodule.hostName = moduleHostName;
childmodulelist.push_back(childmodule);
}
if (moduleName == parentOAMModule.moduleName)
{
parentOAMModule.moduleIP = moduleIPAddr;
parentOAMModule.hostName = moduleHostName;
parentOAMModule.moduleName = moduleName;
}
}
} // end of i for loop
// create a clean Columnstore Support Report
system("rm -f *_configReport.txt");
system("rm -f *_dbmsReport.txt");
system("rm -f *_hardwareReport.txt");
system("rm -f *_logReport.txt");
system("rm -f *_bulklogReport.txt");
system("rm -f *_resourceReport.txt");
//
// Configuration
//
if (CONFIG)
{
string reportType = "config";
cout << "Get " + reportType + " report data for " + localModule << endl;
pthread_t reportthread;
int status = pthread_create(&reportthread, NULL, (void* (*)(void*)) & reportThread, &reportType);
if (status != 0)
{
cout << "ERROR: reportthread: pthread_create failed, return status = " + oam.itoa(status);
}
sleep(1);
}
//
// Alarms and Columnstore Logs
//
if (LOG)
{
string reportType = "log";
cout << "Get " + reportType + " report data for " + localModule << endl;
pthread_t reportthread;
int status = pthread_create(&reportthread, NULL, (void* (*)(void*)) & reportThread, &reportType);
if (status != 0)
{
cout << "ERROR: reportthread: pthread_create failed, return status = " + oam.itoa(status);
}
sleep(1);
}
//
// Bulk Logs
//
if (BULKLOG)
{
string reportType = "bulklog";
cout << "Get " + reportType + " report data for " + localModule << endl;
pthread_t reportthread;
int status = pthread_create(&reportthread, NULL, (void* (*)(void*)) & reportThread, &reportType);
if (status != 0)
{
cout << "ERROR: reportthread: pthread_create failed, return status = " + oam.itoa(status);
}
sleep(1);
}
//
// Hardware
//
if (HARDWARE)
{
string reportType = "hardware";
cout << "Get " + reportType + " report data for " + localModule << endl;
pthread_t reportthread;
int status = pthread_create(&reportthread, NULL, (void* (*)(void*)) & reportThread, &reportType);
if (status != 0)
{
cout << "ERROR: reportthread: pthread_create failed, return status = " + oam.itoa(status);
}
sleep(1);
}
//
// Resources
//
if (RESOURCE)
{
string reportType = "resource";
cout << "Get " + reportType + " report data for " + localModule << endl;
pthread_t reportthread;
int status = pthread_create(&reportthread, NULL, (void* (*)(void*)) & reportThread, &reportType);
if (status != 0)
{
cout << "ERROR: reportthread: pthread_create failed, return status = " + oam.itoa(status);
}
sleep(1);
}
//
// DBMS
//
if (DBMS)
{
cout << "Get dbms report data for " << localModule << endl;
string outputFile = localModule + "_dbmsReport.txt";
FILE* pOutputFile = fopen(outputFile.c_str(), "w");
if (pOutputFile == NULL)
{
cout << "Could not open file: " + outputFile << endl;
exit(1);
}
fprintf(pOutputFile,
"********************************************************************************\n"
"\n"
" System %s\n"
" columnstoreSupportReport script ran from Module %s on %s\n"
" SoftwareVersion = %s-%s"
"\n"
"********************************************************************************\n"
"\n"
" DBMS report\n"
"\n"
"********************************************************************************\n",
systemName.c_str(), localModule.c_str(), currentDate.c_str(), columnstore_version.c_str(),
columnstore_release.c_str());
fclose(pOutputFile);
// run DBMS report on local server
bool FAILED = false;
if (localModule != PrimaryUMModuleName)
{
cout << " FAILED: run columnstoreSupport on '" << PrimaryUMModuleName << "' to get the dbrm report"
<< endl;
FAILED = true;
}
else
{
// check if mysql is supported and get info
string logFile = tmpDir + "/idbmysql.log";
string columnstoreMysql = "mysql -u root ";
string cmd = columnstoreMysql + " -e 'status' > " + logFile + " 2>&1";
system(cmd.c_str());
// check for mysql password set
string pwprompt = " ";
if (checkLogStatus(logFile, "ERROR 1045"))
{
cout << "NOTE: MariaDB Columnstore root user password is set" << endl;
// needs a password, was password entered on command line
if (mysqlpw == " ")
{
// go check columnstore.cnf
string file = std::string(MCSMYCNFDIR) + "/columnstore.cnf";
ifstream oldFile(file.c_str());
vector<string> lines;
char line[200];
string buf;
while (oldFile.getline(line, 200))
{
buf = line;
string::size_type pos = buf.find("password", 0);
if (pos != string::npos)
{
string::size_type pos1 = buf.find("=", 0);
if (pos1 != string::npos)
{
pos = buf.find("#", 0);
if (pos == string::npos)
{
// password arg in columnstore.cnf, go get password
cout << "NOTE: Using password from columnstore.cnf" << endl;
mysqlpw = buf.substr(pos1 + 1, 80);
cout << mysqlpw << endl;
break;
}
}
}
}
oldFile.close();
if (mysqlpw == " ")
{
cout << "NOTE: No password provide on command line or found uncommented in columnstore.cnf"
<< endl;
cout << endl;
string prompt = " *** Enter MariaDB Columnstore password > ";
mysqlpw = getpass(prompt.c_str());
}
}
// check for mysql password set
pwprompt = "--password=" + mysqlpw;
string cmd = columnstoreMysql + pwprompt + " -e 'status' > " + logFile + " 2>&1";
system(cmd.c_str());
if (checkLogStatus(logFile, "ERROR 1045"))
{
cout << "FAILED: Failed login using MariaDB Columnstore root user password '" << mysqlpw << "'"
<< endl;
FAILED = true;
}
}
if (!FAILED)
{
string cmd = "dbmsReport.sh " + localModule + " " + outputFile + " " + std::string(MCSSUPPORTDIR) +
" " + pwprompt;
system(cmd.c_str());
}
}
/*
BT: This doesn't appear to do anything
fprintf(pOutputFile,"\n******************** Database Size Report
*************************************\n\n"); getStorageStatus(pOutputFile);
string file = "databaseSizeReport";
ifstream File (file.c_str());
if (File)
{
string cmd = "databaseSizeReport >> " + outputFile;
system(cmd.c_str());
}
*/
boost::filesystem::path configFile = std::string(MCSMYCNFDIR) + "/columnstore.cnf";
boost::filesystem::copy_file(configFile, "./columnstore.cnf",
boost::filesystem::copy_options::overwrite_existing);
}
int wait = 0;
while (true)
{
// cout << "check " << runningThreads << endl;
if (runningThreads < 1)
break;
sleep(2);
wait++;
// give it 60 minutes to complete
if (wait >= 3600 * 5)
{
cout << "Timed out (60 minutes) waiting for Requests to complete" << endl;
}
}
system("unix2dos *Report.txt > /dev/null 2>&1");
system(
"rm -rf columnstoreSupportReport;"
"mkdir columnstoreSupportReport;"
"mv *Report.txt columnstoreSupportReport/. > /dev/null 2>&1;"
"mv Columnstore.xml columnstoreSupportReport/. > /dev/null 2>&1;"
"mv columnstore.cnf columnstoreSupportReport/. > /dev/null 2>&1;"
"mv storagemanager.cnf columnstoreSupportReport/. > /dev/null 2>&1;"
"mv *Report.tar.gz columnstoreSupportReport/. > /dev/null 2>&1");
string cmd = "tar -zcf columnstoreSupportReport." + systemName + ".tar.gz columnstoreSupportReport/*";
system(cmd.c_str());
cout << endl
<< "Columnstore Support Script Successfully completed, files located in columnstoreSupportReport." +
systemName + ".tar.gz"
<< endl;
}

View File

@@ -1,76 +0,0 @@
#!/bin/bash
#
# $Id: hardwareReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
{
echo " "
echo "******************** Configuration/Status Report for ${MODULE} ********************"
echo " "
chkconfig=`which chkconfig 2>/dev/null`
if [ -n "$chkconfig" ]; then
echo "-- chkconfig configuration --"
echo " "
echo "################# chkconfig --list | grep columnstore #################"
echo " "
chkconfig --list | grep columnstore 2>/dev/null
fi
systemctl=`which systemctl 2>/dev/null`
if [ -n "$systemctl" ]; then
echo "-- systemctl configuration --"
echo " "
echo "################# systemctl list-unit-files --type=service | grep columnstore #################"
echo " "
systemctl list-unit-files --type=service | grep columnstore 2>/dev/null
echo "################# systemctl list-unit-files --type=service | grep mariadb #################"
echo " "
systemctl list-unit-files --type=service | grep mariadb 2>/dev/null
fi
updaterc=`which update-rc.d 2>/dev/null`
if [ -n "$updaterc" ]; then
echo "-- services configuration --"
echo " "
echo "################# service --status-all | grep columnstore #################"
echo " "
service --status-all | grep columnstore 2>/dev/null
fi
echo " "
echo "-- fstab Configuration --"
echo " "
echo "################# cat /etc/fstab #################"
echo " "
cat /etc/fstab 2>/dev/null
echo " "
echo "-- Server Processes --"
echo " "
echo "################# ps axu #################"
echo " "
ps axu
echo " "
echo "-- Server Processes with resource usage --"
echo " "
echo "################# top -b -n 1 #################"
echo " "
top -b -n 1
} >> $OUT_FILE
exit 0

View File

@@ -1,64 +0,0 @@
#! /bin/sh
#
# $Id: dbmsReport.sh
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
if [ $3 ] ; then
MCSSUPPORTDIR=$3
else
MCSSUPPORTDIR="/usr/share/columnstore"
fi
if [ $4 ] ; then
PW_PROMPT=$4
else
PW_PROMPT=""
fi
{
columnstoreMysql="mysql -u root ${PW_PROMPT} "
if ${columnstoreMysql} -V > /dev/null 2>&1; then
echo " "
echo "******************** DBMS Columnstore Version *********************************"
echo " "
${columnstoreMysql} -e 'status'
echo " "
echo "******************** DBMS Columnstore System Column ***************************"
echo " "
${columnstoreMysql} -e 'desc calpontsys.syscolumn;'
echo " "
echo "******************** DBMS Columnstore System Table ****************************"
echo " "
${columnstoreMysql} -e 'desc calpontsys.systable;'
echo " "
echo "******************** DBMS Columnstore System Catalog Data *********************"
echo " "
${columnstoreMysql} calpontsys < $MCSSUPPORTDIR/dumpcat_mysql.sql
echo " "
echo "******************** DBMS Columnstore System Table Data ***********************"
echo "******************** DBMS Columnstore Databases *******************************"
echo " "
${columnstoreMysql} -e 'show databases;'
echo " "
echo "******************** DBMS Columnstore variables *******************************"
echo " "
${columnstoreMysql} -e 'show variables;'
echo " "
fi
} >> $OUT_FILE
exit 0

View File

@@ -1,130 +0,0 @@
#!/bin/bash
#
# Reports the max value from the extent map for the given column.
#
#
# Initialize variables.
#
if [ -z "$MYSQLCMD" ]; then
MYSQLCMD="mysql -u root"
fi
#
# Validate that there are three parameters - schema and table and columnname.
#
if [ $# -ne 3 ]; then
echo ""
echo "Reports the max value for the given column."
echo ""
echo "Parameters:"
echo " Schema"
echo " Table"
echo " Column"
exit 1
fi
db=$1
table=$2
column=$3
#
# Validate that the column exists.
#
sql="select count(*) from syscolumn where \`schema\`='$db' and tablename='$table' and columnname='$column';"
count=`$MYSQLCMD calpontsys --skip-column-names -e "$sql;"`
if [ $count -le 0 ]; then
echo ""
echo "$db.$table.$column does not exist in Columnstore."
echo ""
exit 1
fi
#
# Validate that the column type is one that this script supports.
# Supported Types:
# 6 int
# 8 date
# 9 bigint
# 11 datetime
sql="select datatype from syscolumn where \`schema\`='$db' and tablename='$table' and columnname='$column';"
dataType=`$MYSQLCMD calpontsys --skip-column-names -e "$sql"`
if [ $dataType -ne 6 ] && [ $dataType -ne 8 ] && [ $dataType -ne 9 ] && [ $dataType -ne 11 ]; then
echo ""
echo "The column data type must be an int, bigint, date, or datetime."
echo ""
exit 1
fi
#
# Grab the objectid for the column.
#
sql="select objectid from syscolumn where \`schema\`='$db' and tablename='$table' and columnname='$column';"
objectid=`$MYSQLCMD calpontsys --skip-column-names -e "$sql"`
#
# Set the editem specific parameter if the column is a date or datetime.
#
if [ $dataType -eq 8 ]; then
parm="-t"
elif [ $dataType -eq 11 ]; then
parm="-s"
fi
#
# Use the editem utility to get the min and max value.
#
editem -o $objectid $parm | grep max | awk -v dataType=$dataType '
BEGIN {
allValid=1;
foundValidExtent=0;
}
{
if(dataType == 11) {
state=substr($14, 1, length($14)-1); # Datetime has date and time as two fields.
thisMin=$6 " " substr($7, 1, length($7)-1);
thisMax=$9 " " substr($10, 1, length($10)-1);
}
else {
state=substr($12, 1, length($12)-1);
thisMin=substr($6, 1, length($6)-1);
thisMax=substr($8, 1, length($8)-1);
}
if(state == "valid") {
if(!foundValidExtent) {
min=thisMin;
max=thisMax;
foundValidExtent=1;
}
else {
if(thisMin < min) {
min=thisMin;
}
if(thisMax > max) {
max=thisMax;
}
}
}
else {
allValid=0;
}
}
END {
if(foundValidExtent == 1) {
print "";
print "Min=" min;
print "Max=" max;
print "";
if(allValid == 0) {
print "Not all extents had min and max values set. Answer is incomplete."
}
}
else {
print "";
print "There were not any extents with valid min/max values. Unable to provide answer.";
print "";
}
}'
exit 0

View File

@@ -1,79 +0,0 @@
#! /bin/sh
#
# $Id: hardwareReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
{
echo " "
echo "******************** Hardware Report for ${MODULE} ********************"
echo " "
echo "-- Server OS Version --"
echo " "
echo "################# cat /proc/version #################"
echo " "
cat /proc/version 2>/dev/null
echo " "
echo "################# uname -a #################"
echo " "
uname -a
echo " "
echo "################# cat /etc/issue #################"
echo " "
cat /etc/issue 2>/dev/null
echo " "
echo "run columnstore_os_check.sh"
echo " "
echo "################# /bin/columnstore_os_check.sh #################"
echo " "
columnstore_os_check.sh 2>/dev/null
echo " "
echo "-- Server Uptime --"
echo " "
echo "################# uptime #################"
echo " "
uptime
echo " "
echo "-- Server cpu-info --"
echo " "
echo "################# cat /proc/cpuinfo #################"
echo " "
cat /proc/cpuinfo 2>/dev/null
echo " "
echo "-- Server memory-info --"
echo " "
echo "################# cat /proc/meminfo #################"
echo " "
$cat /proc/meminfo 2>/dev/null
echo " "
echo "-- Server mounts --"
echo " "
echo "################# cat /proc/mounts #################"
echo " "
cat /proc/mounts 2>/dev/null
echo " "
echo "-- Server Ethernet Configuration --"
echo " "
echo "################# ifconfig -a #################"
echo " "
ifconfig -a 2>/dev/null
} >> $OUT_FILE
exit 0

View File

@@ -1,52 +0,0 @@
#! /bin/sh
#
# $Id: logReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
#get temp directory
tmpDir=`mcsGetConfig SystemConfig SystemTempFileDir`
rm -f ${tmpDir}/${MODULE}_logReport.tar.gz
tar -zcf ${tmpDir}/${MODULE}_logReport.tar.gz /var/log/mariadb/columnstore > /dev/null 2>&1
cp ${tmpDir}/${MODULE}_logReport.tar.gz .
tar -zcf ${MODULE}_mysqllogReport.tar.gz /var/log/mysql/*.err 2>/dev/null
echo '******************** Log Configuration ********************' >> $OUT_FILE
echo '' >> $OUT_FILE
echo 'MariaDB ColumnStore System Log Configuration Data' >> $OUT_FILE
echo '' >> $OUT_FILE
configFileName=`mcsGetConfig Installation SystemLogConfigFile`
echo 'System Logging Configuration File being used: '${configFileName} >> $OUT_FILE
echo '' >> $OUT_FILE
echo -e 'Module\tConfigured Log Levels' >> $OUT_FILE
echo -e '------\t---------------------------------------' >> $OUT_FILE
moduleConfig=''
if grep -q '/var/log/mariadb/columnstore/crit.log' ${configFileName}; then
moduleConfig=${moduleConfig}' CRITICAL'
fi
if grep -q '/var/log/mariadb/columnstore/err.log' ${configFileName}; then
moduleConfig=${moduleConfig}' ERROR'
fi
if grep -q '/var/log/mariadb/columnstore/warning.log' ${configFileName}; then
moduleConfig=${moduleConfig}' WARNING'
fi
if grep -q '/var/log/mariadb/columnstore/info.log' ${configFileName}; then
moduleConfig=${moduleConfig}' INFO'
fi
if grep -q '/var/log/mariadb/columnstore/debug.log' ${configFileName}; then
moduleConfig=${moduleConfig}' DEBUG'
fi
echo -e ${MODULE}'\t'${moduleConfig} >> $OUT_FILE
exit 0

View File

@@ -1,621 +0,0 @@
/* Copyright (C) 2019 MariaDB Corporation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include "mcsSupportUtil.h"
using namespace std;
using namespace oam;
using namespace config;
void getSystemNetworkConfig(FILE* pOutputFile)
{
Oam oam;
// get and display Module Network Config
SystemModuleTypeConfig systemmoduletypeconfig;
systemmoduletypeconfig.moduletypeconfig.clear();
// get max length of a host name for header formatting
int maxSize = 9;
try
{
oam.getSystemConfig(systemmoduletypeconfig);
for (unsigned int i = 0; i < systemmoduletypeconfig.moduletypeconfig.size(); i++)
{
if (systemmoduletypeconfig.moduletypeconfig[i].ModuleType.empty())
// end of list
break;
int moduleCount = systemmoduletypeconfig.moduletypeconfig[i].ModuleCount;
string moduletype = systemmoduletypeconfig.moduletypeconfig[i].ModuleType;
string moduletypedesc = systemmoduletypeconfig.moduletypeconfig[i].ModuleDesc;
if (moduleCount > 0)
{
DeviceNetworkList::iterator pt = systemmoduletypeconfig.moduletypeconfig[i].ModuleNetworkList.begin();
for (; pt != systemmoduletypeconfig.moduletypeconfig[i].ModuleNetworkList.end(); pt++)
{
HostConfigList::iterator pt1 = (*pt).hostConfigList.begin();
for (; pt1 != (*pt).hostConfigList.end(); pt1++)
{
if (maxSize < (int)(*pt1).HostName.size())
maxSize = (*pt1).HostName.size();
}
}
}
}
}
catch (exception& e)
{
fprintf(pOutputFile, "**** getNetworkConfig Failed = %s\n\n", e.what());
}
fprintf(pOutputFile, "%-15s%-30s%-10s%-14s%-20s\n", "Module Name", "Module Description", "NIC ID",
"Host Name", "IP Address");
fprintf(pOutputFile, "%-15s%-30s%-10s%-14s%-20s\n", "-----------", "-------------------------", "------",
"---------", "---------------");
try
{
oam.getSystemConfig(systemmoduletypeconfig);
for (unsigned int i = 0; i < systemmoduletypeconfig.moduletypeconfig.size(); i++)
{
if (systemmoduletypeconfig.moduletypeconfig[i].ModuleType.empty())
// end of list
break;
int moduleCount = systemmoduletypeconfig.moduletypeconfig[i].ModuleCount;
string moduletype = systemmoduletypeconfig.moduletypeconfig[i].ModuleType;
string moduletypedesc = systemmoduletypeconfig.moduletypeconfig[i].ModuleDesc;
if (moduleCount > 0)
{
DeviceNetworkList::iterator pt = systemmoduletypeconfig.moduletypeconfig[i].ModuleNetworkList.begin();
for (; pt != systemmoduletypeconfig.moduletypeconfig[i].ModuleNetworkList.end(); pt++)
{
string modulename = (*pt).DeviceName;
string moduleID = modulename.substr(MAX_MODULE_TYPE_SIZE, MAX_MODULE_ID_SIZE);
string modulenamedesc = moduletypedesc + " #" + moduleID;
fprintf(pOutputFile, "%-15s%-30s", modulename.c_str(), modulenamedesc.c_str());
HostConfigList::iterator pt1 = (*pt).hostConfigList.begin();
for (; pt1 != (*pt).hostConfigList.end(); pt1++)
{
/* MCOL-1607. IPAddr may be a host name here b/c it is read straight
from the config file. */
string tmphost = getIPAddress(pt1->IPAddr);
string ipAddr;
if (tmphost.empty())
ipAddr = pt1->IPAddr;
else
ipAddr = tmphost;
string hostname = (*pt1).HostName;
string nicID = oam.itoa((*pt1).NicID);
if (nicID != "1")
{
fprintf(pOutputFile, "%-45s", "");
}
fprintf(pOutputFile, "%-13s%-14s%-20s\n", nicID.c_str(), hostname.c_str(), ipAddr.c_str());
}
}
}
}
}
catch (exception& e)
{
fprintf(pOutputFile, "**** getNetworkConfig Failed = %s\n\n", e.what());
}
}
void getModuleTypeConfig(FILE* pOutputFile)
{
Oam oam;
SystemModuleTypeConfig systemmoduletypeconfig;
ModuleTypeConfig moduletypeconfig;
ModuleConfig moduleconfig;
systemmoduletypeconfig.moduletypeconfig.clear();
try
{
oam.getSystemConfig(systemmoduletypeconfig);
fprintf(pOutputFile, "Module Type Configuration\n\n");
for (unsigned int i = 0; i < systemmoduletypeconfig.moduletypeconfig.size(); i++)
{
if (systemmoduletypeconfig.moduletypeconfig[i].ModuleType.empty())
// end of list
break;
int moduleCount = systemmoduletypeconfig.moduletypeconfig[i].ModuleCount;
if (moduleCount < 1)
continue;
string moduletype = systemmoduletypeconfig.moduletypeconfig[i].ModuleType;
fprintf(pOutputFile, "ModuleType '%s' Configuration information\n", moduletype.c_str());
fprintf(pOutputFile, "ModuleDesc = %s\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleDesc.c_str());
fprintf(pOutputFile, "ModuleCount = %i\n", moduleCount);
if (moduleCount > 0)
{
DeviceNetworkList::iterator pt = systemmoduletypeconfig.moduletypeconfig[i].ModuleNetworkList.begin();
for (; pt != systemmoduletypeconfig.moduletypeconfig[i].ModuleNetworkList.end(); pt++)
{
string modulename = (*pt).DeviceName;
HostConfigList::iterator pt1 = (*pt).hostConfigList.begin();
for (; pt1 != (*pt).hostConfigList.end(); pt1++)
{
string ipAddr = (*pt1).IPAddr;
string servername = (*pt1).HostName;
fprintf(pOutputFile, "ModuleHostName and ModuleIPAddr for NIC ID %u on module '%s' = %s , %s\n",
(*pt1).NicID, modulename.c_str(), servername.c_str(), ipAddr.c_str());
}
}
}
DeviceDBRootList::iterator pt = systemmoduletypeconfig.moduletypeconfig[i].ModuleDBRootList.begin();
for (; pt != systemmoduletypeconfig.moduletypeconfig[i].ModuleDBRootList.end(); pt++)
{
if ((*pt).dbrootConfigList.size() > 0)
{
fprintf(pOutputFile, "DBRootIDs assigned to module 'pm%u' = ", (*pt).DeviceID);
DBRootConfigList::iterator pt1 = (*pt).dbrootConfigList.begin();
for (; pt1 != (*pt).dbrootConfigList.end();)
{
fprintf(pOutputFile, "%u", *pt1);
pt1++;
if (pt1 != (*pt).dbrootConfigList.end())
fprintf(pOutputFile, ", ");
}
}
fprintf(pOutputFile, "\n");
}
fprintf(pOutputFile, "ModuleCPUCriticalThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleCPUCriticalThreshold);
fprintf(pOutputFile, "ModuleCPUMajorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleCPUMajorThreshold);
fprintf(pOutputFile, "ModuleCPUMinorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleCPUMinorThreshold);
fprintf(pOutputFile, "ModuleCPUMinorClearThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleCPUMinorClearThreshold);
fprintf(pOutputFile, "ModuleDiskCriticalThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleDiskCriticalThreshold);
fprintf(pOutputFile, "ModuleDiskMajorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleDiskMajorThreshold);
fprintf(pOutputFile, "ModuleDiskMinorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleDiskMinorThreshold);
fprintf(pOutputFile, "ModuleMemCriticalThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleMemCriticalThreshold);
fprintf(pOutputFile, "ModuleMemMajorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleMemMajorThreshold);
fprintf(pOutputFile, "ModuleMemMinorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleMemMinorThreshold);
fprintf(pOutputFile, "ModuleSwapCriticalThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleSwapCriticalThreshold);
fprintf(pOutputFile, "ModuleSwapMajorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleSwapMajorThreshold);
fprintf(pOutputFile, "ModuleSwapMinorThreshold = %u%%\n",
systemmoduletypeconfig.moduletypeconfig[i].ModuleSwapMinorThreshold);
DiskMonitorFileSystems::iterator pt2 = systemmoduletypeconfig.moduletypeconfig[i].FileSystems.begin();
int id = 1;
for (; pt2 != systemmoduletypeconfig.moduletypeconfig[i].FileSystems.end(); pt2++)
{
string fs = *pt2;
fprintf(pOutputFile, "ModuleDiskMonitorFileSystem#%i = %s\n", id, fs.c_str());
++id;
}
fprintf(pOutputFile, "\n");
}
}
catch (exception& e)
{
cout << endl << "**** getModuleTypeConfig Failed = " << e.what() << endl;
}
}
void getStorageConfig(FILE* pOutputFile)
{
Oam oam;
try
{
systemStorageInfo_t t;
t = oam.getStorageConfig();
string cloud;
try
{
oam.getSystemConfig("Cloud", cloud);
}
catch (...)
{
}
string::size_type pos = cloud.find("amazon", 0);
if (pos != string::npos)
cloud = "amazon";
fprintf(pOutputFile, "System Storage Configuration\n");
fprintf(pOutputFile, "Performance Module (DBRoot) Storage Type = %s\n", boost::get<0>(t).c_str());
if (cloud == "amazon")
fprintf(pOutputFile, "User Module Storage Type = %s\n", boost::get<3>(t).c_str());
fprintf(pOutputFile, "System Assigned DBRoot Count = %i\n", boost::get<1>(t));
DeviceDBRootList moduledbrootlist = boost::get<2>(t);
typedef std::vector<int> dbrootList;
dbrootList dbrootlist;
DeviceDBRootList::iterator pt = moduledbrootlist.begin();
for (; pt != moduledbrootlist.end(); pt++)
{
fprintf(pOutputFile, "DBRoot IDs assigned to 'pm%u' = ", (*pt).DeviceID);
DBRootConfigList::iterator pt1 = (*pt).dbrootConfigList.begin();
for (; pt1 != (*pt).dbrootConfigList.end();)
{
fprintf(pOutputFile, "%u", *pt1);
dbrootlist.push_back(*pt1);
pt1++;
if (pt1 != (*pt).dbrootConfigList.end())
fprintf(pOutputFile, ", ");
}
fprintf(pOutputFile, "\n");
}
// get any unassigned DBRoots
/*DBRootConfigList undbrootlist;
try
{
oam.getUnassignedDbroot(undbrootlist);
}
catch (...) {}
if ( !undbrootlist.empty() )
{
fprintf(pOutputFile,"DBRoot IDs unassigned = ");
DBRootConfigList::iterator pt1 = undbrootlist.begin();
for ( ; pt1 != undbrootlist.end() ;)
{
fprintf(pOutputFile,"%u",*pt1);
pt1++;
if (pt1 != undbrootlist.end())
fprintf(pOutputFile,", ");
}
fprintf(pOutputFile,"\n");
}*/
fprintf(pOutputFile, "\n");
// um volumes
if (cloud == "amazon" && boost::get<3>(t) == "external")
{
ModuleTypeConfig moduletypeconfig;
oam.getSystemConfig("um", moduletypeconfig);
for (int id = 1; id < moduletypeconfig.ModuleCount + 1; id++)
{
string volumeNameID = "UMVolumeName" + oam.itoa(id);
string volumeName = oam::UnassignedName;
string deviceNameID = "UMVolumeDeviceName" + oam.itoa(id);
string deviceName = oam::UnassignedName;
try
{
oam.getSystemConfig(volumeNameID, volumeName);
oam.getSystemConfig(deviceNameID, deviceName);
}
catch (...)
{
}
fprintf(pOutputFile, "Amazon EC2 Volume Name/Device Name for 'um%i': %s, %s", id, volumeName.c_str(),
deviceName.c_str());
}
}
// pm volumes
if (cloud == "amazon" && boost::get<0>(t) == "external")
{
fprintf(pOutputFile, "\n");
DBRootConfigList dbrootConfigList;
try
{
oam.getSystemDbrootConfig(dbrootConfigList);
DBRootConfigList::iterator pt = dbrootConfigList.begin();
for (; pt != dbrootConfigList.end(); pt++)
{
string volumeNameID = "PMVolumeName" + oam.itoa(*pt);
string volumeName = oam::UnassignedName;
string deviceNameID = "PMVolumeDeviceName" + oam.itoa(*pt);
string deviceName = oam::UnassignedName;
try
{
oam.getSystemConfig(volumeNameID, volumeName);
oam.getSystemConfig(deviceNameID, deviceName);
}
catch (...)
{
continue;
}
}
}
catch (exception& e)
{
cout << endl << "**** getSystemDbrootConfig Failed : " << e.what() << endl;
}
// print un-assigned dbroots
/*DBRootConfigList::iterator pt1 = undbrootlist.begin();
for ( ; pt1 != undbrootlist.end() ; pt1++)
{
string volumeNameID = "PMVolumeName" + oam.itoa(*pt1);
string volumeName = oam::UnassignedName;
string deviceNameID = "PMVolumeDeviceName" + oam.itoa(*pt1);
string deviceName = oam::UnassignedName;
try
{
oam.getSystemConfig( volumeNameID, volumeName);
oam.getSystemConfig( deviceNameID, deviceName);
}
catch (...)
{
continue;
}
}*/
}
string DataRedundancyConfig;
int DataRedundancyCopies;
string DataRedundancyStorageType;
try
{
oam.getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
}
catch (...)
{
}
if (DataRedundancyConfig == "y")
{
fprintf(pOutputFile, "\nData Redundant Configuration\n\n");
fprintf(pOutputFile, "Copies Per DBroot = %i", DataRedundancyCopies);
oamModuleInfo_t st;
string moduleType;
try
{
st = oam.getModuleInfo();
moduleType = boost::get<1>(st);
}
catch (...)
{
}
if (moduleType != "pm")
return;
try
{
DBRootConfigList dbrootConfigList;
oam.getSystemDbrootConfig(dbrootConfigList);
DBRootConfigList::iterator pt = dbrootConfigList.begin();
for (; pt != dbrootConfigList.end(); pt++)
{
fprintf(pOutputFile, "DBRoot #%u has copies on PMs = ", *pt);
string pmList = "";
try
{
string errmsg;
// oam.glusterctl(oam::GLUSTER_WHOHAS, oam.itoa(*pt), pmList, errmsg);
}
catch (...)
{
}
boost::char_separator<char> sep(" ");
boost::tokenizer<boost::char_separator<char> > tokens(pmList, sep);
for (boost::tokenizer<boost::char_separator<char> >::iterator it = tokens.begin();
it != tokens.end(); ++it)
{
fprintf(pOutputFile, "%s ", (*it).c_str());
}
fprintf(pOutputFile, "\n");
}
fprintf(pOutputFile, "\n");
}
catch (exception& e)
{
cout << endl << "**** getSystemDbrootConfig Failed : " << e.what() << endl;
}
}
}
catch (exception& e)
{
cout << endl << "**** getStorageConfig Failed : " << e.what() << endl;
}
}
void getStorageStatus(FILE* pOutputFile)
{
Oam oam;
fprintf(pOutputFile, "System External DBRoot Storage Statuses\n\n");
fprintf(pOutputFile, "Component Status Last Status Change\n");
fprintf(pOutputFile, "------------ -------------------------- ------------------------\n");
/*try
{
oam.getSystemStatus(systemstatus, false);
if ( systemstatus.systemdbrootstatus.dbrootstatus.size() == 0 )
{
fprintf(pOutputFile," No External DBRoot Storage Configured\n\n");
return;
}
for ( unsigned int i = 0 ; i < systemstatus.systemdbrootstatus.dbrootstatus.size(); i++)
{
if ( systemstatus.systemdbrootstatus.dbrootstatus[i].Name.empty() )
// end of list
break;
int state = systemstatus.systemdbrootstatus.dbrootstatus[i].OpState;
string stime = systemstatus.systemdbrootstatus.dbrootstatus[i].StateChangeDate ;
stime = stime.substr (0, 24);
fprintf(pOutputFile,"DBRoot%s%-29s%-24s\n",
systemstatus.systemdbrootstatus.dbrootstatus[i].Name.c_str(),
oamState[state].c_str(),
stime.c_str());
}
fprintf(pOutputFile,"\n");
}
catch (exception& e)
{
cout << endl << "**** getSystemStatus Failed = " << e.what() << endl;
}*/
string DataRedundancyConfig;
int DataRedundancyCopies;
string DataRedundancyStorageType;
try
{
oam.getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
}
catch (...)
{
}
}
/********************************************************************
*
* checkLogStatus - Check for a phrase in a log file and return status
*
********************************************************************/
bool checkLogStatus(std::string fileName, std::string phrase)
{
ifstream file(fileName.c_str());
if (!file.is_open())
{
return false;
}
string buf;
while (getline(file, buf))
{
string::size_type pos = buf.find(phrase, 0);
if (pos != string::npos)
// found phrase
return true;
}
if (file.bad())
{
return false;
}
file.close();
return false;
}
/******************************************************************************************
* @brief Get Network IP Address for Host Name
*
* purpose: Get Network IP Address for Host Name
*
******************************************************************************************/
string getIPAddress(string hostName)
{
static uint32_t my_bind_addr;
struct hostent* ent;
string IPAddr = "";
Oam oam;
ent = gethostbyname(hostName.c_str());
if (ent != 0)
{
my_bind_addr = (uint32_t)((in_addr*)ent->h_addr_list[0])->s_addr;
uint8_t split[4];
uint32_t ip = my_bind_addr;
split[0] = (ip & 0xff000000) >> 24;
split[1] = (ip & 0x00ff0000) >> 16;
split[2] = (ip & 0x0000ff00) >> 8;
split[3] = (ip & 0x000000ff);
IPAddr =
oam.itoa(split[3]) + "." + oam.itoa(split[2]) + "." + oam.itoa(split[1]) + "." + oam.itoa(split[0]);
}
return IPAddr;
}

View File

@@ -1,66 +0,0 @@
#! /bin/sh
#
# $Id: resourceReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
{
echo " "
echo "******************** Resource Usage Report for ${MODULE} ********************"
echo " "
echo " "
echo "-- Shared Memory --"
echo " "
echo "################# ipcs -l #################"
echo " "
ipcs -l
echo "################# clearShm -n #################"
echo " "
clearShm -n
echo " "
echo "-- Disk Usage --"
echo " "
echo "################# df -k #################"
echo " "
df -k
echo " "
echo "-- Disk BRM Data files --"
echo " "
ls -l /var/lib/columnstore/data1/systemFiles/dbrm 2> /dev/null
ls -l /var/lib/columnstore/dbrm 2> /dev/null
echo "################# cat /var/lib/columnstore/data1/systemFiles/dbrm/BRM_saves_current #################"
echo " "
cat /var/lib/columnstore/data1/systemFiles/dbrm/BRM_saves_current 2> /dev/null
echo " "
echo "-- View Table Locks --"
echo " "
echo "################# cat bin/viewtablelock #################"
echo " "
viewtablelock 2> /dev/null
echo " "
echo "-- BRM Extent Map --"
echo " "
echo "################# bin/editem -i #################"
echo " "
editem -i 2>/dev/null
} >> $OUT_FILE
exit 0

View File

@@ -1,30 +0,0 @@
#!/bin/bash
#
# $Id: hardwareReport.sh 421 2007-04-05 15:46:55Z dhill $
#
if [ $1 ] ; then
MODULE=$1
else
MODULE="pm1"
fi
if [ $2 ] ; then
OUT_FILE=$2
else
OUT_FILE=${MODULE}_logReport.txt
fi
{
echo " "
echo "******************** Software Report for ${MODULE} ********************"
echo " "
echo " "
echo "-- Columnstore Package Details --"
echo " "
rpm -qi MariaDB-columnstore-engine
echo " "
} >> $OUT_FILE
exit 0

View File

@@ -1,34 +0,0 @@
#
# Not used
#
# original Makefile.am contents follow:
# Copyright (C) 2014 InfiniDB, Inc.
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# $Id: Makefile.am 333 2009-04-03 20:35:04Z rdempsey $ Process this file with automake to produce Makefile.in
#
# AM_CPPFLAGS = $(idb_cppflags) AM_CFLAGS = $(idb_cflags) AM_CXXFLAGS = $(idb_cxxflags) AM_LDFLAGS = $(idb_ldflags)
# bin_PROGRAMS = ReplayTransactionLog ReplayTransactionLog_SOURCES = replaytransactionlog.cpp
# ReplayTransactionLog_CPPFLAGS = @idb_common_includes@ $(AM_CPPFLAGS) ReplayTransactionLog_LDFLAGS =
# @idb_common_ldflags@ @idb_exec_libs@ -lreplaytxnlog $(AM_LDFLAGS)
#
# test:
#
# coverage:
#
# leakcheck:
#
# docs:
#
# bootstrap: install-data-am
#

View File

@@ -1,156 +0,0 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
// WWW - Add header comment.
#include <iostream>
#include <string>
#include <sstream>
#include "liboamcpp.h"
using namespace std;
using namespace oam;
#include "replaytxnlog.h"
namespace
{
void usage(char* prog)
{
cout << endl;
cout << "Usage: " << prog << " [options]" << endl;
cout << endl;
cout << "This utility can be used after a backup is restored to report transactions that " << endl;
cout << "occurred after the backup. It begins with the first transaction that was committed " << endl;
cout << "after the backup and reports DDL and DML statements as well as imports." << endl;
cout << endl;
cout << "Options:" << endl;
/*
cout << "-u <user> Database user id." << endl << endl;
cout << "-p <password> Password." << endl << endl;
*/
cout << "-d <stop date> Stop date and time as mm/dd/yy@hh:mm:ss or 'Now'." << endl;
cout << " Only transactions committed before this date and time will be reported." << endl;
cout << " The current date and time will be used if 'Now'." << endl << endl;
/*
cout << "-i Ignore bulk load log entries." << endl;
cout << " The program will pause and prompt at bulk load entries by default." << endl <<
endl;
cout << "-e Report mode. The sql statements will be displayed to the console only. No" <<
endl; cout << " transactions will be processed. The user and password will be ignored." <<
endl << endl;
*/
cout << "-h Display this help." << endl << endl;
}
bool isRunningOnPm()
{
Oam oam;
oamModuleInfo_t t;
string moduleType;
int installType = -1;
char* csc_ident = getenv("CALPONT_CSC_IDENT");
if (csc_ident == 0 || *csc_ident == 0)
{
// get local module info valdiate running on a pm
try
{
t = oam.getModuleInfo();
moduleType = boost::get<1>(t);
installType = boost::get<5>(t);
}
catch (exception& e)
{
moduleType = "pm";
}
}
else
moduleType = csc_ident;
if (installType != oam::INSTALL_COMBINE_DM_UM_PM)
{
if (moduleType != "pm")
{
cerr << "Exiting, ReplayTransactionLog can only be run on a performance module (pm)" << endl;
return false;
}
}
return true;
}
} // namespace
int main(int argc, char** argv)
{
string user;
string password;
string stopDate;
bool ignoreBulk = false;
bool reportMode = false;
char c;
// Invokes member function `int operator ()(void);'
while ((c = getopt(argc, argv, "u:p:d:ihe")) != -1)
{
switch (c)
{
/*
case 'u':
user = optarg;
break;
case 'p':
password = optarg;
break;
*/
case 'd': stopDate = optarg; break;
/*
case 'i':
ignoreBulk = true;
break;
case 'e':
reportMode = true;
break;
*/
case 'h':
usage(argv[0]);
return 0;
break;
default:
usage(argv[0]);
return 1;
break;
}
}
if (!isRunningOnPm())
{
return 0;
}
ReplayTxnLog replayTxnLog(user, password, stopDate, ignoreBulk, reportMode);
replayTxnLog.process();
return 0;
}

View File

@@ -1,66 +0,0 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/***************************************************************************
* wweeks@calpont.com *
* *
***************************************************************************/
using namespace std;
#include <iostream>
#include <cstdlib>
#include <sys/types.h>
#include <unistd.h>
#include "sessionmanager.h"
#include <cppunit/extensions/HelperMacros.h>
using namespace execplan;
int maxNewTxns = 1000;
int maxTxns = 1000;
class ExecPlanTest : public CppUnit::TestFixture
{
CPPUNIT_TEST_SUITE(ExecPlanTest);
CPPUNIT_TEST_SUITE_END();
private:
public:
void setUp()
{
}
void tearDown()
{
}
}; // test suite
CPPUNIT_TEST_SUITE_REGISTRATION(ExecPlanTest);
#include <cppunit/extensions/TestFactoryRegistry.h>
#include <cppunit/ui/text/TestRunner.h>
int main(int argc, char* argv[])
{
CppUnit::TextUi::TestRunner runner;
CppUnit::TestFactoryRegistry& registry = CppUnit::TestFactoryRegistry::getRegistry();
runner.addTest(registry.makeTest());
bool wasSuccessful = runner.run("", false);
return (wasSuccessful ? 0 : 1);
}

View File

@@ -1,34 +0,0 @@
#
# Not used
#
# original Makefile.am contents follow:
# Copyright (C) 2014 InfiniDB, Inc.
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# $Id: Makefile.am 333 2009-04-03 20:35:04Z rdempsey $ Process this file with automake to produce Makefile.in
#
# AM_CPPFLAGS = $(idb_cppflags) AM_CFLAGS = $(idb_cflags) AM_CXXFLAGS = $(idb_cxxflags) AM_LDFLAGS = $(idb_ldflags)
# bin_PROGRAMS = sessionWalker sessionWalker_SOURCES = sessionwalker.cpp sessionWalker_CPPFLAGS = @idb_common_includes@
# $(AM_CPPFLAGS) sessionWalker_LDFLAGS = @idb_common_ldflags@ @idb_common_libs@ @idb_write_libs@ @netsnmp_libs@
# $(AM_LDFLAGS)
#
# test:
#
# coverage:
#
# leakcheck:
#
# docs:
#
# bootstrap: install-data-am
#

View File

@@ -1,135 +0,0 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/***************************************************************************
* $Id: sessionwalker.cpp 3072 2013-04-04 19:04:45Z rdempsey $
*
* jrodriguez@calpont.com
* *
***************************************************************************/
#include <iostream>
using namespace std;
#include "sessionmonitor.h"
using namespace execplan;
#include "vendordmlstatement.h"
#include "calpontdmlpackage.h"
#include "calpontdmlfactory.h"
using namespace dmlpackage;
#include "bytestream.h"
#include "messagequeue.h"
using namespace messageqcpp;
namespace
{
void usage()
{
cout << "sessionwalker [-d|-h]" << endl
<< " -r rollback all transactions found" << endl
<< " -h display this help" << endl;
}
void rollback(const SessionMonitor::MonSIDTIDEntry& txn)
{
VendorDMLStatement dmlStmt("ROLLBACK;", txn.sessionid);
CalpontDMLPackage* pDMLPackage = CalpontDMLFactory::makeCalpontDMLPackage(dmlStmt);
if (pDMLPackage == 0)
{
return;
}
ByteStream bytestream;
pDMLPackage->write(bytestream);
delete pDMLPackage;
MessageQueueClient mq("DMLProc");
try
{
cout << "sending ROLLBACK for sessionID " << txn.sessionid << endl;
mq.write(bytestream);
bytestream = mq.read();
}
catch (...)
{
}
}
} // namespace
int main(int argc, char** argv)
{
bool rflg = false;
opterr = 0;
int c;
while ((c = getopt(argc, argv, "rh")) != EOF)
switch (c)
{
case 'r': rflg = true; break;
case 'h':
usage();
return 0;
break;
default:
usage();
return 1;
break;
}
vector<SessionMonitor::MonSIDTIDEntry*> toTxns;
SessionMonitor* monitor = new SessionMonitor();
toTxns.clear();
toTxns = monitor->timedOutTxns(); // get timed out txns
vector<SessionMonitor::MonSIDTIDEntry*>::iterator iter = toTxns.begin();
vector<SessionMonitor::MonSIDTIDEntry*>::iterator end = toTxns.end();
vector<SessionMonitor::MonSIDTIDEntry*> tmp;
while (iter != end)
{
if ((*iter)->sessionid > 0)
tmp.push_back(*iter);
++iter;
}
toTxns.swap(tmp);
cout << toTxns.size() << " timed out transactions." << endl;
for (unsigned idx = 0; idx < toTxns.size(); idx++)
{
monitor->printTxns(*toTxns[idx]);
if (rflg)
{
rollback(*toTxns[idx]);
}
}
delete monitor;
return 0;
}

View File

@@ -1,189 +0,0 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/***************************************************************************
* jrodriguez@calpont.com *
* *
***************************************************************************/
using namespace std;
#include <iostream>
#include <cstdlib>
#include <sys/types.h>
#include <unistd.h>
#include "sessionmonitor.h"
#include "sessionmanager.h"
#include <cppunit/extensions/HelperMacros.h>
using namespace execplan;
int maxNewTxns = 1000;
int maxTxns = 1000;
class ExecPlanTest : public CppUnit::TestFixture
{
CPPUNIT_TEST_SUITE(ExecPlanTest);
CPPUNIT_TEST(MonitorTestPlan_1);
CPPUNIT_TEST_SUITE_END();
private:
public:
void setUp()
{
}
void tearDown()
{
}
int verifyLen;
SessionManager* manager;
SessionManager::TxnID managerTxns[1000];
int createTxns(const int& start, const int& end)
{
int first = start;
int last = end;
int newTxns = 0;
verifyLen = manager->verifySize();
for (int idx = first; idx < last && verifyLen < maxNewTxns; idx++)
{
managerTxns[idx] = manager->newTxnID((uint32_t)idx + 1000);
CPPUNIT_ASSERT(managerTxns[idx].id > 0);
CPPUNIT_ASSERT(managerTxns[idx].valid == true);
verifyLen = manager->verifySize();
CPPUNIT_ASSERT(verifyLen > 0);
newTxns++;
}
CPPUNIT_ASSERT(newTxns == last - first);
return newTxns;
}
int closeTxns(const int& start, const int& end)
{
int first = start;
int last = end;
int totalClosed = 0;
for (int idx = first; idx < last; idx++)
{
try
{
SessionManager::TxnID tmp = manager->getTxnID(idx + 1000);
if (tmp.valid == true)
{
manager->committed(tmp);
CPPUNIT_ASSERT(tmp.valid == false);
totalClosed++;
}
}
catch (exception& e)
{
cerr << e.what() << endl;
continue;
}
}
return totalClosed;
} // closeTxns
void MonitorTestPlan_1()
{
int currStartTxn = 0;
int currEndTxn = 5;
int txnCntIncr = 5;
const int sleepTime = 1;
const int iterMax = 1;
vector<SessionMonitor::MonSIDTIDEntry*> toTxns;
manager = new SessionManager();
// CPPUNIT_ASSERT(manager->verifySize()==0);
SessionMonitor* monitor = NULL;
for (int jdx = 0; jdx < iterMax; jdx++)
{
// store the current state of the SessionManager
monitor = new SessionMonitor();
monitor->AgeLimit(sleepTime);
delete monitor;
int idx = 0;
int grpStart = currStartTxn;
for (idx = 0; idx < 3; idx++)
{
createTxns(currStartTxn, currEndTxn);
// CPPUNIT_ASSERT(manager->verifySize()==(idx+1)*txnCntIncr);
currStartTxn += txnCntIncr;
currEndTxn += txnCntIncr;
sleep(sleepTime + 1); // make sessions time out
monitor = new SessionMonitor(); // read Monitor data
monitor->AgeLimit(sleepTime);
toTxns.clear();
toTxns = monitor->timedOutTxns(); // get timed out txns
CPPUNIT_ASSERT(toTxns.size() == (uint32_t)txnCntIncr * idx);
delete monitor;
}
int grpEnd = currEndTxn;
monitor = new SessionMonitor();
monitor->AgeLimit(sleepTime);
closeTxns(grpStart, grpEnd); // close this iteration of txns
// CPPUNIT_ASSERT(manager->verifySize()==0);
toTxns = monitor->timedOutTxns(); // get timed out txns
CPPUNIT_ASSERT(toTxns.size() == 0);
delete monitor;
}
monitor = new SessionMonitor(); // readload Monitor data
monitor->AgeLimit(sleepTime - 1);
toTxns.clear();
toTxns = monitor->timedOutTxns(); // get timed out txns
CPPUNIT_ASSERT(toTxns.size() == 0);
delete monitor;
// CPPUNIT_ASSERT(manager->verifySize()==0);
delete manager;
}
}; // test suite
CPPUNIT_TEST_SUITE_REGISTRATION(ExecPlanTest);
#include <cppunit/extensions/TestFactoryRegistry.h>
#include <cppunit/ui/text/TestRunner.h>
int main(int argc, char* argv[])
{
CppUnit::TextUi::TestRunner runner;
CppUnit::TestFactoryRegistry& registry = CppUnit::TestFactoryRegistry::getRegistry();
runner.addTest(registry.makeTest());
bool wasSuccessful = runner.run("", false);
return (wasSuccessful ? 0 : 1);
}

View File

@@ -14,4 +14,4 @@ set(dbbc_STAT_SRCS
fsutils.cpp
)
columnstore_static_library(dbbc ${dbbc_STAT_SRCS})
columnstore_link(dbbc ${NETSNMP_LIBRARIES} loggingcpp)
columnstore_link(dbbc loggingcpp)

View File

@@ -5,4 +5,4 @@ include_directories(${ENGINE_COMMON_INCLUDES} ../blockcache ../primproc)
set(processor_STAT_SRCS primitiveprocessor.cpp dictionary.cpp column.cpp)
columnstore_static_library(processor ${processor_STAT_SRCS})
columnstore_link(processor ${NETSNMP_LIBRARIES} loggingcpp)
columnstore_link(processor loggingcpp)

View File

@@ -40,7 +40,6 @@ using namespace boost;
#include "simd_sse.h"
#include "simd_arm.h"
#include "utils/common/columnwidth.h"
#include "utils/common/bit_cast.h"
#include "exceptclasses.h"

View File

@@ -3,25 +3,26 @@ include_directories(${ENGINE_COMMON_INCLUDES} ../blockcache ../linux-port)
# ########## next target ###############
set(PrimProc_SRCS
primproc.cpp
activestatementcounter.cpp
batchprimitiveprocessor.cpp
bppseeder.cpp
bppsendthread.cpp
columncommand.cpp
command.cpp
dictstep.cpp
femsghandler.cpp
filtercommand.cpp
logger.cpp
passthrucommand.cpp
primitiveserver.cpp
primproc.cpp
pseudocc.cpp
rssmonfcn.cpp
rtscommand.cpp
umsocketselector.cpp
samenodepseudosocket.cpp
serviceexemgr.cpp
sqlfrontsessionthread.cpp
rssmonfcn.cpp
activestatementcounter.cpp
femsghandler.cpp
umsocketselector.cpp
../../utils/common/crashtrace.cpp
)
@@ -31,11 +32,11 @@ target_include_directories(PrimProc PRIVATE ${Boost_INCLUDE_DIRS})
columnstore_link(
PrimProc
${ENGINE_LDFLAGS}
${NETSNMP_LIBRARIES}
${ENGINE_WRITE_LIBS}
threadpool
cacheutils
dbbc
processor
loggingcpp
statistics_manager
)

View File

@@ -78,7 +78,7 @@
#include "dbrm.h"
#include "mariadb_my_sys.h"
#include "statistics.h"
#include "statistics_manager/statistics.h"
#include "serviceexemgr.h"
#include "sqlfrontsessionthread.h"

View File

@@ -58,7 +58,7 @@
#include "dbrm.h"
#include "mariadb_my_sys.h"
#include "statistics.h"
#include "statistics_manager/statistics.h"
namespace exemgr
{
@@ -69,7 +69,7 @@ class Opt
int m_debug;
bool m_e;
bool m_fg;
Opt() : m_debug(0), m_e(false), m_fg(false){};
Opt() : m_debug(0), m_e(false), m_fg(false) {};
Opt(int argc, char* argv[]) : m_debug(0), m_e(false), m_fg(false)
{
int c;

View File

@@ -56,13 +56,13 @@
#include "dbrm.h"
#include "mariadb_my_sys.h"
#include "statistics.h"
#include "statistics_manager/statistics.h"
#include "serviceexemgr.h"
namespace exemgr
{
class SQLFrontSessionThread
{
class SQLFrontSessionThread
{
public:
SQLFrontSessionThread(const messageqcpp::IOSocket& ios, joblist::DistributedEngineComm* ec,
joblist::ResourceManager* rm)
@@ -125,7 +125,8 @@ namespace exemgr
void analyzeTableExecute(messageqcpp::ByteStream& bs, joblist::SJLP& jl, bool& stmtCounted);
void analyzeTableHandleStats(messageqcpp::ByteStream& bs);
uint64_t roundMB(uint64_t value) const;
public:
void operator()();
};
}
};
} // namespace exemgr

View File

@@ -64,7 +64,7 @@ configure_file(
link_directories(${CMAKE_BINARY_DIR}/lib)
set(CMAKE_INSTALL_RPATH $ORIGIN $ORIGIN/../lib)
columnstore_library(storagemanager SHARED ${storagemanager_SRCS})
columnstore_library(storagemanager ${storagemanager_SRCS})
add_dependencies(storagemanager marias3 external_boost)
target_compile_definitions(storagemanager PUBLIC BOOST_NO_CXX11_SCOPED_ENUMS)
@@ -82,7 +82,6 @@ target_include_directories(storagemanager PRIVATE ${Boost_INCLUDE_DIRS})
columnstore_executable(StorageManager src/main.cpp)
columnstore_link(StorageManager storagemanager)
set_property(TARGET StorageManager PROPERTY CXX_STANDARD 20)
set(TMPDIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})

View File

@@ -19,12 +19,12 @@
#include <deque>
#include <string>
#include <map>
#include <memory>
#include <optional>
#include <unordered_map>
#include "CloudStorage.h"
#include "libmarias3/marias3.h"
#include "Config.h"
#include <curl/curl.h>
namespace storagemanager

View File

@@ -1,83 +0,0 @@
#!/bin/bash
SCRIPT_LOCATION=$(dirname "$0")
MARIADB_SOURCE_PATH=$(realpath $SCRIPT_LOCATION/../../../../../)
COLUMNSTORE_MTR_SOURCE=$(realpath $SCRIPT_LOCATION/../../mysql-test/columnstore)
INSTALLED_MTR_PATH='/usr/share/mysql/mysql-test'
COLUMSNTORE_MTR_INSTALLED=${INSTALLED_MTR_PATH}/plugin/columnstore/columnstore/
PATCHNAME=$(realpath $SCRIPT_LOCATION)/mtr_warn.patch
CURRENT_DIR=`pwd`
mysql -e "create database if not exists test;"
SOCKET=`mysql -e "show variables like 'socket';" | grep socket | cut -f2`
export ASAN_OPTIONS=abort_on_error=1:disable_coredump=0,print_stats=false,detect_odr_violation=0,check_initialization_order=1,detect_stack_use_after_return=1,atexit=false,log_path=/core/asan.hz
# needed when run MTR tests locally, see mariadb-test-run.pl:417, mtr functions
# are added to the database mtr only when --extern is not specified
add_mtr_warn_functions()
{
echo "Adding mtr warnings functions..."
cd /tmp
mysql -e "drop database if exists mtr";
cp ${MARIADB_SOURCE_PATH}/mysql-test/include/mtr_warnings.sql mtr_warnings.sql
patch -p1 < ${PATCHNAME}
mysql -e "create database if not exists mtr;"
mysql mtr < mtr_warnings.sql
rm mtr_warnings.sql
cd -
echo "MTR Warnings function added"
}
cd ${INSTALLED_MTR_PATH}
if [[ ! -d ${COLUMSNTORE_MTR_INSTALLED} ]]; then
echo ' ・ Adding symlink for columnstore tests to ${COLUMSNTORE_MTR_INSTALLED} from ${COLUMNSTORE_MTR_SOURCE} '
ln -s ${COLUMNSTORE_MTR_SOURCE} ${COLUMSNTORE_MTR_INSTALLED}
fi
if [[ ! -d '/data/qa/source/dbt3/' || ! -d '/data/qa/source/ssb/' ]]; then
echo ' ・ Downloading and extracting test data for full MTR to /data'
bash -c "wget -qO- https://cspkg.s3.amazonaws.com/mtr-test-data.tar.lz4 | lz4 -dc - | tar xf - -C /"
fi
run_suite()
{
ls /core >$CURRENT_DIR/mtr.$1.cores-before
./mtr --force --extern=socket=${SOCKET} --max-test-fail=0 --testcase-timeout=60 --suite=columnstore/$1 $2 | tee $CURRENT_DIR/mtr.$1.log 2>&1
# dump analyses.
systemctl stop mariadb
systemctl start mariadb
ls /core >$CURRENT_DIR/mtr.$1.cores-after
echo "reports or coredumps:"
diff -u $CURRENT_DIR/mtr.$1.cores-before $CURRENT_DIR/mtr.$1.cores-after && echo "no new reports or coredumps"
rm $CURRENT_DIR/mtr.$1.cores-before $CURRENT_DIR/mtr.$1.cores-after
}
add_mtr_warn_functions
if (( $# == 2 )); then
run_suite $1 $2
exit 1
fi
if (( $# == 1 )); then
run_suite $1
exit 1
fi
run_suite basic
run_suite bugfixes
run_suite setup
run_suite devregression
run_suite autopilot
run_suite extended
run_suite multinode
run_suite oracle
run_suite 1pmonly
cd -

107
tests/scripts/run_mtr.sh Executable file
View File

@@ -0,0 +1,107 @@
#!/bin/bash
SCRIPT_LOCATION=$(dirname "$0")
COLUMNSTORE_SOURCE_PATH=$(realpath $SCRIPT_LOCATION/../../)
MARIADB_SOURCE_PATH=$(realpath $SCRIPT_LOCATION/../../../../../)
COLUMNSTORE_MTR_SOURCE=$(realpath $COLUMNSTORE_SOURCE_PATH/mysql-test/columnstore)
INSTALLED_MTR_PATH='/usr/share/mysql/mysql-test/'
PATCHNAME=$(realpath $SCRIPT_LOCATION)/mtr_warn.patch
CURRENT_DIR=$(pwd)
source $COLUMNSTORE_SOURCE_PATH/build/utils.sh
optparse.define short=s long=suite desc="whole suite to run" variable=SUITE_NAME
optparse.define short=t long=test_full_name desc="Testname with suite as like bugfixes.mcol-4899" variable=TEST_FULL_NAME default=""
optparse.define short=f long=full desc="Run full MTR" variable=RUN_FULL default=false value=true
optparse.define short=r long=record desc="Record the result" variable=RECORD default=false value=true
optparse.define short=e long=no-extern desc="Run without --extern" variable=EXTERN default=true value=false
source $(optparse.build)
mariadb -e "create database if not exists test;"
SOCKET=$(mariadb -e "show variables like 'socket';" | grep socket | cut -f2)
export ASAN_OPTIONS=abort_on_error=1:disable_coredump=0,print_stats=false,detect_odr_violation=0,check_initialization_order=1,detect_stack_use_after_return=1,atexit=false,log_path=/core/asan.hz
# needed when run MTR tests locally, see mariadb-test-run.pl:417, mtr functions
# are added to the database mtr only when --extern is not specified
add_mtr_warn_functions() {
message "Adding mtr warnings functions..."
cd /tmp
mariadb -e "drop database if exists mtr"
cp ${MARIADB_SOURCE_PATH}/mysql-test/include/mtr_warnings.sql mtr_warnings.sql
patch -p1 <${PATCHNAME}
mariadb -e "create database if not exists mtr;"
mariadb mtr <mtr_warnings.sql
rm mtr_warnings.sql
cd -
echo "MTR Warnings function added"
}
cd ${INSTALLED_MTR_PATH}
if [[ ! -d ${INSTALLED_MTR_PATH}/suite/columnstore ]]; then
message " ・ Adding symlink for columnstore tests to ${INSTALLED_MTR_PATH}/suite/columnstore from ${COLUMNSTORE_MTR_SOURCE}"
ln -s ${COLUMNSTORE_MTR_SOURCE} ${INSTALLED_MTR_PATH}/suite
fi
if [[ ! -d '/data/qa/source/dbt3/' || ! -d '/data/qa/source/ssb/' ]]; then
message ' ・ Downloading and extracting test data for full MTR to /data'
bash -c "wget -qO- https://cspkg.s3.amazonaws.com/mtr-test-data.tar.lz4 | lz4 -dc - | tar xf - -C /"
fi
if [[ -n $TEST_FULL_NAME ]]; then
SUITE_NAME="${TEST_FULL_NAME%%.*}"
TEST_NAME="${TEST_FULL_NAME#*.}"
fi
run_suite() {
ls /core >$CURRENT_DIR/mtr.$1.cores-before
if [[ $EXTERN == true ]]; then
EXTERN_FLAG="--extern=socket=${SOCKET}"
else
EXTERN_FLAG=""
fi
if [[ $RECORD == true ]]; then
RECORD_FLAG="--record"
else
RECORD_FLAG=""
fi
./mtr --force $EXTERN_FLAG $RECORD_FLAG --max-test-fail=0 --testcase-timeout=60 --suite=columnstore/$1 $2 | tee $CURRENT_DIR/mtr.$1.log 2>&1
# dump analyses.
systemctl stop mariadb
systemctl start mariadb
ls /core >$CURRENT_DIR/mtr.$1.cores-after
message "reports or coredumps:"
diff -u $CURRENT_DIR/mtr.$1.cores-before $CURRENT_DIR/mtr.$1.cores-after && echo "no new reports or coredumps"
rm $CURRENT_DIR/mtr.$1.cores-before $CURRENT_DIR/mtr.$1.cores-after
}
add_mtr_warn_functions
if [[ $RUN_FULL == true ]]; then
message "Running FULL MTR"
run_suite basic
run_suite bugfixes
run_suite setup
run_suite devregression
run_suite autopilot
run_suite extended
run_suite multinode
run_suite oracle
run_suite 1pmonly
else
message "Running suite $SUITE_NAME with test $TEST_NAME"
run_suite $SUITE_NAME $TEST_NAME
fi
cd -

View File

@@ -1,14 +1,14 @@
add_subdirectory(dbbuilder)
add_subdirectory(editem)
add_subdirectory(dbloadxml)
add_subdirectory(getConfig)
add_subdirectory(cplogger)
add_subdirectory(clearShm)
add_subdirectory(cleartablelock)
add_subdirectory(configMgt)
add_subdirectory(cplogger)
add_subdirectory(dbbuilder)
add_subdirectory(dbloadxml)
add_subdirectory(ddlcleanup)
add_subdirectory(editem)
add_subdirectory(getConfig)
add_subdirectory(idbmeminfo)
add_subdirectory(passwd)
add_subdirectory(rebuildEM)
add_subdirectory(setConfig)
add_subdirectory(viewtablelock)
add_subdirectory(cleartablelock)
add_subdirectory(ddlcleanup)
add_subdirectory(idbmeminfo)
add_subdirectory(rebuildEM)
add_subdirectory(passwd)
add_subdirectory(configMgt)

View File

@@ -6,4 +6,4 @@ set(cleartablelock_SRCS cleartablelock.cpp cleartablelockthread.cpp)
columnstore_executable(cleartablelock ${cleartablelock_SRCS})
columnstore_link(cleartablelock ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_WRITE_LIBS})
columnstore_link(cleartablelock ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS})

View File

@@ -5,4 +5,4 @@ include_directories(${ENGINE_COMMON_INCLUDES})
set(autoConfigure_SRCS autoConfigure.cpp)
add_executable(autoConfigure ${autoConfigure_SRCS})
columnstore_link(autoConfigure ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_EXEC_LIBS})
columnstore_link(autoConfigure ${ENGINE_LDFLAGS} ${ENGINE_EXEC_LIBS})

View File

@@ -6,4 +6,4 @@ set(dbbuilder_SRCS dbbuilder.cpp systemcatalog.cpp)
columnstore_executable(dbbuilder ${dbbuilder_SRCS})
columnstore_link(dbbuilder ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_WRITE_LIBS})
columnstore_link(dbbuilder ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS})

View File

@@ -5,4 +5,4 @@ include_directories(${ENGINE_COMMON_INCLUDES})
set(ddlcleanup_SRCS ddlcleanup.cpp)
columnstore_executable(ddlcleanup ${ddlcleanup_SRCS})
columnstore_link(ddlcleanup ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_WRITE_LIBS} ddlcleanuputil)
columnstore_link(ddlcleanup ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS} ddlcleanuputil)

View File

@@ -5,4 +5,4 @@ include_directories(${ENGINE_COMMON_INCLUDES})
set(editem_SRCS editem.cpp)
columnstore_executable(editem ${editem_SRCS})
columnstore_link(editem ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_EXEC_LIBS})
columnstore_link(editem ${ENGINE_LDFLAGS} ${ENGINE_EXEC_LIBS})

View File

@@ -1,9 +0,0 @@
include_directories(${ENGINE_COMMON_INCLUDES})
# ########## next target ###############
set(rgprint_SRCS rgprint.cpp)
columnstore_executable(rgprint ${rgprint_SRCS})
columnstore_link(rgprint ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_WRITE_LIBS})

View File

@@ -1,95 +0,0 @@
/* Copyright (C) 2021 MariaDB Corporation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <iostream>
#include <utils/rowgroup/rowgroup.h>
int main(int argc, char* argv[])
{
if (argc < 2)
{
std::cerr << "Usage: " << argv[0] << " <dump file>" << std::endl;
return 0;
}
rowgroup::RowGroup rg;
char* p = strrchr(argv[1], '/');
int rfd = -1;
if (p == nullptr)
p = argv[1];
unsigned pid;
void* agg;
auto c = sscanf(p, "Agg-p%u-t%p-", &pid, &agg);
if (c == 2)
{
char fname[1024];
snprintf(fname, sizeof(fname), "META-p%u-t%p", pid, agg);
rfd = open(fname, O_RDONLY);
}
if (rfd < 0)
rfd = open("./META", O_RDONLY);
if (rfd >= 0)
{
struct stat rst;
fstat(rfd, &rst);
messageqcpp::ByteStream rbs;
rbs.needAtLeast(rst.st_size);
rbs.restart();
auto r = read(rfd, rbs.getInputPtr(), rst.st_size);
if (r != rst.st_size)
abort();
rbs.advanceInputPtr(r);
rg.deserialize(rbs);
close(rfd);
}
else
{
std::vector<uint32_t> pos{2, 6, 22, 30, 46, 54}; // ?
std::vector<uint32_t> oids{3011, 3011, 3011, 3011, 3011}; // ?
std::vector<uint32_t> keys{1, 1, 1, 1, 1}; // ?
std::vector<execplan::CalpontSystemCatalog::ColDataType> col_t{
execplan::CalpontSystemCatalog::INT, execplan::CalpontSystemCatalog::LONGDOUBLE,
execplan::CalpontSystemCatalog::UBIGINT, execplan::CalpontSystemCatalog::LONGDOUBLE,
execplan::CalpontSystemCatalog::UBIGINT};
std::vector<uint32_t> csN{8, 8, 8, 8, 8};
std::vector<uint32_t> scale{0, 0, 0, 0, 0};
std::vector<uint32_t> prec{10, 4294967295, 9999, 4294967295, 19};
rg = rowgroup::RowGroup(5, pos, oids, keys, col_t, csN, scale, prec, 20, false, std::vector<bool>{});
}
int fd = open(argv[1], O_RDONLY);
struct stat st;
fstat(fd, &st);
messageqcpp::ByteStream bs;
bs.needAtLeast(st.st_size);
bs.restart();
auto r = read(fd, bs.getInputPtr(), st.st_size);
if (r != st.st_size)
abort();
bs.advanceInputPtr(r);
rowgroup::RGData rst;
rst.deserialize(bs);
rg.setData(&rst);
close(fd);
std::cout << "RowGroup data:\n" << rg.toString() << std::endl;
return 0;
}

View File

@@ -5,4 +5,4 @@ include_directories(${ENGINE_COMMON_INCLUDES})
set(setConfig_SRCS main.cpp)
columnstore_executable(mcsSetConfig ${setConfig_SRCS})
columnstore_link(mcsSetConfig ${ENGINE_LDFLAGS} ${NETSNMP_LIBRARIES} ${ENGINE_EXEC_LIBS})
columnstore_link(mcsSetConfig ${ENGINE_LDFLAGS} ${ENGINE_EXEC_LIBS})

Some files were not shown because too many files have changed in this diff Show More