You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-10-31 18:30:33 +03:00
Merge branch 'stable-23.10' into feat/MCOL-6072-parallel-scan-4-CES-4
This commit is contained in:
@@ -8,7 +8,7 @@ local servers = {
|
||||
};
|
||||
|
||||
local platforms = {
|
||||
[current_branch]: ["rockylinux:8", "rockylinux:9", "debian:12", "ubuntu:22.04", "ubuntu:24.04"],
|
||||
[current_branch]: ["rockylinux:8", "rockylinux:9", "rockylinux:10", "debian:12", "ubuntu:22.04", "ubuntu:24.04"],
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ include(configureEngine)
|
||||
include(compiler_flags)
|
||||
include(misc)
|
||||
include(cpack_manage)
|
||||
include(selinux_policy)
|
||||
|
||||
if(NOT __msg1_CS_NO_CXX20)
|
||||
add_subdirectory(dbcon/mysql)
|
||||
|
||||
@@ -23,7 +23,7 @@ COLUMSNTORE_SOURCE_PATH=$(realpath "$SCRIPT_LOCATION"/../)
|
||||
DEFAULT_MARIA_BUILD_PATH=$(realpath "$MDB_SOURCE_PATH"/../BuildOf_$(basename "$MDB_SOURCE_PATH"))
|
||||
|
||||
BUILD_TYPE_OPTIONS=("Debug" "RelWithDebInfo")
|
||||
DISTRO_OPTIONS=("ubuntu:20.04" "ubuntu:22.04" "ubuntu:24.04" "debian:11" "debian:12" "rockylinux:8" "rockylinux:9")
|
||||
DISTRO_OPTIONS=("ubuntu:20.04" "ubuntu:22.04" "ubuntu:24.04" "debian:11" "debian:12" "rockylinux:8" "rockylinux:9" "rocky:10")
|
||||
|
||||
GCC_VERSION="11"
|
||||
MDB_CMAKE_FLAGS=()
|
||||
@@ -124,7 +124,7 @@ install_deps() {
|
||||
libjemalloc-dev liblz-dev liblzo2-dev liblzma-dev liblz4-dev libbz2-dev libbenchmark-dev libdistro-info-perl \
|
||||
graphviz devscripts ccache equivs eatmydata curl python3"
|
||||
|
||||
if [[ "$OS" == *"rockylinux:8"* || "$OS" == *"rocky:8"* ]]; then
|
||||
if is_rocky_version $OS 8; then
|
||||
command="dnf install -y curl 'dnf-command(config-manager)' && dnf config-manager --set-enabled powertools && \
|
||||
dnf install -y libarchive cmake ${RPM_BUILD_DEPS}"
|
||||
if [[ $GCC_TOOLSET = false ]]; then
|
||||
@@ -132,9 +132,8 @@ install_deps() {
|
||||
else
|
||||
command="$command && dnf install -y gcc-toolset-${GCC_VERSION} && . /opt/rh/gcc-toolset-${GCC_VERSION}/enable"
|
||||
fi
|
||||
elif
|
||||
[[ "$OS" == "rockylinux:9"* || "$OS" == "rocky:9"* ]]
|
||||
then
|
||||
|
||||
elif is_rocky_version_ge $OS 9; then
|
||||
command="dnf install -y 'dnf-command(config-manager)' && dnf config-manager --set-enabled crb && \
|
||||
dnf install -y pcre2-devel gcc gcc-c++ curl-minimal ${RPM_BUILD_DEPS}"
|
||||
|
||||
@@ -145,6 +144,10 @@ install_deps() {
|
||||
exit 17
|
||||
fi
|
||||
|
||||
if is_rocky_version_ge $OS 10; then
|
||||
command="${command} && dnf install -y selinux-policy-devel"
|
||||
fi
|
||||
|
||||
if [[ $OS == 'ubuntu:22.04' || $OS == 'ubuntu:24.04' ]]; then
|
||||
if [ -f /.dockerenv ]; then
|
||||
change_ubuntu_mirror us
|
||||
|
||||
@@ -46,13 +46,23 @@ install_deps() {
|
||||
|
||||
cd "$COLUMNSTORE_SOURCE_PATH"/cmapi
|
||||
|
||||
if [[ "$OS" == "rockylinux:9" ]]; then
|
||||
if is_rocky_version $OS 9; then
|
||||
retry_eval 5 "dnf install -q -y libxcrypt-compat yum-utils"
|
||||
retry_eval 5 "dnf config-manager --set-enabled devel && dnf update -q -y" #to make redhat-lsb-core available for rocky 9
|
||||
fi
|
||||
|
||||
#no redhat-lsb-release for rockylinux >=10
|
||||
if is_rocky_version $OS && ! is_rocky_version_ge $OS 10; then
|
||||
retry_eval 5 "dnf update -q -y && dnf install -q -y redhat-lsb-core"
|
||||
fi
|
||||
|
||||
|
||||
if is_rocky_version_ge $OS 10; then
|
||||
retry_eval 5 "dnf update -q -y && dnf install -q -y libxcrypt-compat"
|
||||
fi
|
||||
|
||||
if [[ "$PKG_FORMAT" == "rpm" ]]; then
|
||||
retry_eval 5 "dnf update -q -y && dnf install -q -y epel-release wget zstd findutils gcc cmake make rpm-build redhat-lsb-core libarchive"
|
||||
retry_eval 5 "dnf update -q -y && dnf install -q -y epel-release wget zstd findutils gcc cmake make rpm-build libarchive"
|
||||
else
|
||||
retry_eval 5 "apt-get update -qq -o Dpkg::Use-Pty=0 && apt-get install -qq -o Dpkg::Use-Pty=0 wget zstd findutils gcc cmake make dpkg-dev lsb-release"
|
||||
fi
|
||||
|
||||
@@ -54,13 +54,13 @@ start_container() {
|
||||
if [[ "$CONTAINER_NAME" == *smoke* ]]; then
|
||||
docker_run_args+=(--memory 3g)
|
||||
elif [[ "$CONTAINER_NAME" == *mtr* ]]; then
|
||||
docker_run_args+=(--shm-size=500m --memory 8g --env MYSQL_TEST_DIR="$MTR_PATH")
|
||||
docker_run_args+=(--shm-size=500m --memory 12g --env MYSQL_TEST_DIR="$MTR_PATH")
|
||||
elif [[ "$CONTAINER_NAME" == *cmapi* ]]; then
|
||||
docker_run_args+=(--env PYTHONPATH="${PYTHONPATH}")
|
||||
elif [[ "$CONTAINER_NAME" == *upgrade* ]]; then
|
||||
docker_run_args+=(--env UCF_FORCE_CONFNEW=1 --volume /sys/fs/cgroup:/sys/fs/cgroup:ro)
|
||||
elif [[ "$CONTAINER_NAME" == *regression* ]]; then
|
||||
docker_run_args+=(--shm-size=500m --memory 12g)
|
||||
docker_run_args+=(--shm-size=500m --memory 15g)
|
||||
else
|
||||
echo "Unknown container type: $CONTAINER_NAME"
|
||||
exit 1
|
||||
|
||||
55
build/security/columnstore.te
Normal file
55
build/security/columnstore.te
Normal file
@@ -0,0 +1,55 @@
|
||||
module columnstore 1.0;
|
||||
|
||||
require {
|
||||
type tmpfs_t;
|
||||
type var_lib_t;
|
||||
type mysqld_var_run_t;
|
||||
type mysqld_t;
|
||||
type user_home_t;
|
||||
type fs_t;
|
||||
type ipp_port_t;
|
||||
type unreserved_port_t;
|
||||
class file { getattr map open read write };
|
||||
class filesystem associate;
|
||||
class tcp_socket { name_connect bind listen };
|
||||
}
|
||||
|
||||
# Define new types for ColumnStore
|
||||
type columnstore_t;
|
||||
type columnstore_db_t;
|
||||
type columnstore_tmp_t;
|
||||
|
||||
type columnstore_exec_t;
|
||||
|
||||
|
||||
|
||||
# Type attributes
|
||||
domain_type(columnstore_t)
|
||||
files_type(columnstore_db_t)
|
||||
files_type(columnstore_tmp_t)
|
||||
|
||||
|
||||
|
||||
# ColumnStore permissions
|
||||
allow columnstore_t columnstore_db_t:file { getattr map open read write };
|
||||
allow columnstore_t columnstore_tmp_t:file { getattr map open read write };
|
||||
allow columnstore_t columnstore_tmp_t:filesystem associate;
|
||||
allow columnstore_t mysqld_var_run_t:file map;
|
||||
allow columnstore_t self:file map;
|
||||
|
||||
# Allow MariaDB to read ColumnStore database files
|
||||
allow mysqld_t columnstore_db_t:file { open read };
|
||||
|
||||
allow mysqld_t tmpfs_t:file { read write open getattr map};
|
||||
allow mysqld_t var_lib_t:file { read open };
|
||||
allow mysqld_t user_home_t:file { read open };
|
||||
allow mysqld_t columnstore_db_t:file { read open };
|
||||
|
||||
allow mysqld_t fs_t:filesystem associate;
|
||||
|
||||
|
||||
# MariaDB to ColumnStore network access
|
||||
|
||||
allow mysqld_t unreserved_port_t:tcp_socket name_connect;
|
||||
|
||||
allow mysqld_t ipp_port_t:tcp_socket { name_connect bind listen };
|
||||
28
build/selinux_policy_rpm_post.sh
Normal file
28
build/selinux_policy_rpm_post.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
# Post-install script to load ColumnStore SELinux policy if SELinux is enabled
|
||||
# This script must not introduce new runtime dependencies; it only uses coreutils and typical SELinux tools if present.
|
||||
|
||||
set -e
|
||||
|
||||
POLICY_PATH="/usr/share/columnstore/policy/selinux/columnstore.pp"
|
||||
|
||||
# If SELinux tooling is not present, or policy file missing, silently exit
|
||||
command -v getenforce >/dev/null 2>&1 || exit 0
|
||||
command -v semodule >/dev/null 2>&1 || exit 0
|
||||
|
||||
# Only attempt to install when SELinux is enforcing or permissive
|
||||
MODE=$(getenforce 2>/dev/null || echo Disabled)
|
||||
case "$MODE" in
|
||||
Enforcing|Permissive)
|
||||
if [ -r "$POLICY_PATH" ]; then
|
||||
# Install or upgrade the module; do not fail the entire package if this fails
|
||||
semodule -i "$POLICY_PATH" || true
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Disabled or unknown, do nothing
|
||||
:
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
15
build/selinux_policy_rpm_postun.sh
Normal file
15
build/selinux_policy_rpm_postun.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
# Post-uninstall script to remove ColumnStore SELinux policy module if present
|
||||
# No new runtime dependencies; use SELinux tools only if available.
|
||||
|
||||
set -e
|
||||
|
||||
# If SELinux tooling is not present, silently exit
|
||||
command -v semodule >/dev/null 2>&1 || exit 0
|
||||
|
||||
# Remove the module if it is installed; do not fail package removal if this fails
|
||||
if semodule -l 2>/dev/null | grep -q '^columnstore\b'; then
|
||||
semodule -r columnstore || true
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -604,3 +604,58 @@ change_ubuntu_mirror_in_docker() {
|
||||
|
||||
execInnerDocker "$container_name" "$docker_funcs; change_ubuntu_mirror ${region}"
|
||||
}
|
||||
|
||||
is_rocky_version() {
|
||||
local image="$1"
|
||||
local version="$2"
|
||||
|
||||
if [[ -z "$image" ]]; then
|
||||
echo "Usage: is_rocky_version <image> [version]"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$image" == *"rockylinux"* || "$image" == *"rocky"* ]]; then
|
||||
if [[ -n "$version" ]]; then
|
||||
if [[ "$image" == *":$version"* ]]; then
|
||||
return 0 # matches Rocky Linux with version
|
||||
else
|
||||
return 1 # Rocky Linux but wrong version
|
||||
fi
|
||||
else
|
||||
return 0 # matches Rocky Linux, any version
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1 # not Rocky Linux
|
||||
}
|
||||
|
||||
is_rocky_version_ge() {
|
||||
local image="$1"
|
||||
local min_version="$2"
|
||||
|
||||
if [[ -z "$image" || -z "$min_version" ]]; then
|
||||
echo "Usage: is_rocky_version_ge <image> <min_version>"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# First check if it's Rocky Linux at all
|
||||
if ! is_rocky_version "$image"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract the version from the tag (after colon)
|
||||
local tag="${image##*:}"
|
||||
if [[ "$tag" == "$image" ]]; then
|
||||
return 1 # no tag -> cannot compare
|
||||
fi
|
||||
|
||||
# Major version (before dot if any)
|
||||
local major="${tag%%.*}"
|
||||
|
||||
if [[ "$major" =~ ^[0-9]+$ && "$min_version" =~ ^[0-9]+$ ]]; then
|
||||
((major >= min_version))
|
||||
return $?
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ with section("markup"):
|
||||
# If comment markup is enabled, don't reflow the first comment block in each
|
||||
# listfile. Use this to preserve formatting of your copyright/license
|
||||
# statements.
|
||||
first_comment_is_literal = False
|
||||
first_comment_is_literal = True
|
||||
|
||||
# If comment markup is enabled, don't reflow any comment block which matches
|
||||
# this (regex) pattern. Default is `None` (disabled).
|
||||
|
||||
@@ -107,3 +107,46 @@ macro(columnstore_executable executable_name)
|
||||
endif()
|
||||
columnstore_install_target(${executable_name} ${ENGINE_BINDIR})
|
||||
endmacro()
|
||||
|
||||
# Read /etc/os-release and output: ID (lowercase) and VERSION_ID major number
|
||||
function(columnstore_detect_os OUT_ID OUT_VER_MAJOR)
|
||||
set(_os_id "")
|
||||
set(_os_version_major "")
|
||||
|
||||
set(_os_release "/etc/os-release")
|
||||
if(EXISTS "${_os_release}")
|
||||
file(READ "${_os_release}" _osr)
|
||||
# Extract ID
|
||||
string(REGEX MATCH "\nID=([^\n]+)" _id_match "\nID=([^\n]+)" ${_osr})
|
||||
if(_id_match)
|
||||
string(REGEX REPLACE ".*\nID=\"?([^\"\n]+)\"?.*" "\\1" _os_id "${_osr}")
|
||||
string(TOLOWER "${_os_id}" _os_id)
|
||||
endif()
|
||||
# Extract VERSION_ID major digits
|
||||
string(REGEX MATCH "\nVERSION_ID=([^\n]+)" _vid_match "\nVERSION_ID=([^\n]+)" ${_osr})
|
||||
if(_vid_match)
|
||||
string(REGEX REPLACE ".*\nVERSION_ID=\"?([0-9]+).*" "\\1" _os_version_major "${_osr}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(${OUT_ID}
|
||||
"${_os_id}"
|
||||
PARENT_SCOPE
|
||||
)
|
||||
set(${OUT_VER_MAJOR}
|
||||
"${_os_version_major}"
|
||||
PARENT_SCOPE
|
||||
)
|
||||
endfunction()
|
||||
|
||||
# Check whether a given lowercase OS ID is RHEL-like (RHEL/Rocky/Alma/CentOS/RedHat)
|
||||
function(columnstore_is_rhel_like OS_ID OUT_BOOL)
|
||||
set(_is_rhel_like FALSE)
|
||||
if(${OS_ID} MATCHES "^(rhel|rocky|almalinux|centos|redhatenterpriseserver|redhatenterprise|redhat)$")
|
||||
set(_is_rhel_like TRUE)
|
||||
endif()
|
||||
set(${OUT_BOOL}
|
||||
"${_is_rhel_like}"
|
||||
PARENT_SCOPE
|
||||
)
|
||||
endfunction()
|
||||
|
||||
100
cmake/selinux_policy.cmake
Normal file
100
cmake/selinux_policy.cmake
Normal file
@@ -0,0 +1,100 @@
|
||||
# Build SELinux policy and package it for RPM on RHEL-like systems >= 10 only
|
||||
# Builds from: storage/columnstore/columnstore/build/security/columnstore.te
|
||||
# Produces: columnstore.pp packaged under ${ENGINE_SUPPORTDIR}/policy/selinux
|
||||
# Adds BuildRequires: selinux-policy-devel (RPM, RHEL-like >= 10)
|
||||
|
||||
# Detect if we are building an RPM package
|
||||
if(NOT RPM)
|
||||
return()
|
||||
endif()
|
||||
|
||||
columnstore_detect_os(_os_id _os_version_major)
|
||||
columnstore_is_rhel_like("${_os_id}" _is_rhel_like)
|
||||
|
||||
# We only build on RHEL-like >= 10
|
||||
if(NOT _is_rhel_like
|
||||
OR (NOT _os_version_major)
|
||||
OR (_os_version_major LESS 10)
|
||||
)
|
||||
message(
|
||||
STATUS
|
||||
"SELinux policy build skipped: OS '${_os_id}' version '${_os_version_major}' not matching RHEL-like >= 10 or undetected."
|
||||
)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# Add RPM BuildRequires for the engine component only on matching systems Use the common appender macro to handle comma
|
||||
# separation
|
||||
columnstore_append_for_cpack(CPACK_RPM_columnstore-engine_PACKAGE_BUILDREQUIRES "selinux-policy-devel")
|
||||
|
||||
# Paths
|
||||
set(SELINUX_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/../build/security")
|
||||
set(SELINUX_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/selinux")
|
||||
set(SELINUX_TE "${SELINUX_SRC_DIR}/columnstore.te")
|
||||
set(SELINUX_PP "${SELINUX_BUILD_DIR}/columnstore.pp")
|
||||
|
||||
file(MAKE_DIRECTORY "${SELINUX_BUILD_DIR}")
|
||||
|
||||
# Ensure selinux-policy-devel is available
|
||||
if(NOT EXISTS "/usr/share/selinux/devel/Makefile")
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"SELinux policy build requires '/usr/share/selinux/devel/Makefile'. Please install 'selinux-policy-devel' (RHEL/Rocky >= 10) and re-run CMake."
|
||||
)
|
||||
endif()
|
||||
|
||||
# Custom command to build the .pp from .te using the upstream devel Makefile
|
||||
add_custom_command(
|
||||
OUTPUT "${SELINUX_PP}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${SELINUX_TE}" "${SELINUX_BUILD_DIR}/columnstore.te"
|
||||
COMMAND make -f /usr/share/selinux/devel/Makefile columnstore.pp
|
||||
WORKING_DIRECTORY "${SELINUX_BUILD_DIR}"
|
||||
DEPENDS "${SELINUX_TE}"
|
||||
COMMENT "Building SELinux policy columnstore.pp from columnstore.te"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_target(selinux_policy ALL DEPENDS "${SELINUX_PP}")
|
||||
|
||||
# Install the compiled policy into the package (no runtime dep). Post-install will load it conditionally.
|
||||
install(
|
||||
FILES "${SELINUX_PP}"
|
||||
DESTINATION "${ENGINE_SUPPORTDIR}/policy/selinux"
|
||||
COMPONENT columnstore-engine
|
||||
)
|
||||
|
||||
# Register RPM post-install and post-uninstall scripts for the component
|
||||
set(_selinux_post "${CMAKE_CURRENT_LIST_DIR}/../build/selinux_policy_rpm_post.sh")
|
||||
set(_selinux_postun "${CMAKE_CURRENT_LIST_DIR}/../build/selinux_policy_rpm_postun.sh")
|
||||
|
||||
# POST_INSTALL: preserve existing script if set by wrapping it
|
||||
if(EXISTS "${_selinux_post}")
|
||||
if(DEFINED CPACK_RPM_columnstore-engine_POST_INSTALL_SCRIPT_FILE
|
||||
AND CPACK_RPM_columnstore-engine_POST_INSTALL_SCRIPT_FILE
|
||||
)
|
||||
set(_orig_post "${CPACK_RPM_columnstore-engine_POST_INSTALL_SCRIPT_FILE}")
|
||||
set(_wrap_post "${SELINUX_BUILD_DIR}/post_install_wrapper.sh")
|
||||
file(WRITE "${_wrap_post}" "#!/bin/sh\n\n'${_orig_post}' \"$@\" || true\n'${_selinux_post}' \"$@\" || true\n")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E chmod +x "${_wrap_post}")
|
||||
set(CPACK_RPM_columnstore-engine_POST_INSTALL_SCRIPT_FILE "${_wrap_post}")
|
||||
else()
|
||||
set(CPACK_RPM_columnstore-engine_POST_INSTALL_SCRIPT_FILE "${_selinux_post}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# POST_UNINSTALL: preserve existing script if set by wrapping it
|
||||
if(EXISTS "${_selinux_postun}")
|
||||
if(DEFINED CPACK_RPM_columnstore-engine_POST_UNINSTALL_SCRIPT_FILE
|
||||
AND CPACK_RPM_columnstore-engine_POST_UNINSTALL_SCRIPT_FILE
|
||||
)
|
||||
set(_orig_postun "${CPACK_RPM_columnstore-engine_POST_UNINSTALL_SCRIPT_FILE}")
|
||||
set(_wrap_postun "${SELINUX_BUILD_DIR}/post_uninstall_wrapper.sh")
|
||||
file(WRITE "${_wrap_postun}"
|
||||
"#!/bin/sh\n\n'${_orig_postun}' \"$@\" || true\n'${_selinux_postun}' \"$@\" || true\n"
|
||||
)
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E chmod +x "${_wrap_postun}")
|
||||
set(CPACK_RPM_columnstore-engine_POST_UNINSTALL_SCRIPT_FILE "${_wrap_postun}")
|
||||
else()
|
||||
set(CPACK_RPM_columnstore-engine_POST_UNINSTALL_SCRIPT_FILE "${_selinux_postun}")
|
||||
endif()
|
||||
endif()
|
||||
@@ -152,11 +152,11 @@ if(RPM)
|
||||
string(REPLACE "-" "_" SERVER_VERSION ${SERVER_VERSION})
|
||||
get_linux_lsb_release_information()
|
||||
|
||||
string(REGEX MATCH "^." OS_VERSION_MAJOR "${LSB_RELEASE_VERSION_SHORT}")
|
||||
string(REGEX MATCH "^[0-9]+" OS_VERSION_MAJOR "${LSB_RELEASE_VERSION_SHORT}")
|
||||
message(STATUS ${OS_VERSION_MAJOR})
|
||||
if(LSB_RELEASE_ID_SHORT MATCHES "centos|rocky|rhel|alma|RedHatEnterprise")
|
||||
set(OS_NAME_SHORT "el")
|
||||
if(OS_VERSION_MAJOR MATCHES "9")
|
||||
if(OS_VERSION_MAJOR GREATER_EQUAL 9)
|
||||
set(CPACK_RPM_PACKAGE_REQUIRES "libxcrypt-compat")
|
||||
endif()
|
||||
else()
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
[](https://ci.columnstore.mariadb.net/mariadb-corporation/mariadb-columnstore-cmapi)
|
||||
|
||||
## Overview
|
||||
This RESTfull server enables multi-node setups for MCS.
|
||||
This RESTful server enables multi-node setups for MCS.
|
||||
|
||||
## Requirements
|
||||
|
||||
See requirements.txt file.
|
||||
|
||||
All the Python packages prerequisits are shipped with a pre-built Python enterpreter.
|
||||
All the Python packages prerequisites are shipped with a pre-built Python interpreter.
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -1,64 +1,115 @@
|
||||
# Read value for a variable from VERSION.
|
||||
MACRO(MYSQL_GET_CONFIG_VALUE keyword var)
|
||||
IF(NOT ${var})
|
||||
FILE (STRINGS ${SERVER_SOURCE_DIR}/VERSION str REGEX "^[ ]*${keyword}=")
|
||||
IF(str)
|
||||
STRING(REPLACE "${keyword}=" "" str ${str})
|
||||
STRING(REGEX REPLACE "[ ].*" "" str "${str}")
|
||||
SET(${var} ${str})
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
ENDMACRO()
|
||||
|
||||
macro(MYSQL_GET_CONFIG_VALUE keyword var)
|
||||
if(NOT ${var})
|
||||
file(STRINGS ${SERVER_SOURCE_DIR}/VERSION str REGEX "^[ ]*${keyword}=")
|
||||
if(str)
|
||||
string(REPLACE "${keyword}=" "" str ${str})
|
||||
string(REGEX REPLACE "[ ].*" "" str "${str}")
|
||||
set(${var} ${str})
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
function(get_linux_lsb_release_information)
|
||||
# made like in autobake script but for rhel too, cause aws linux gives
|
||||
# wrong CMAKE_SYSTEM for eg: Linux-5.4.0-1029-aws
|
||||
FIND_PROGRAM(LSB_RELEASE_EXEC lsb_release)
|
||||
if(NOT LSB_RELEASE_EXEC)
|
||||
MESSAGE(FATAL_ERROR "Could not detect lsb_release executable, can not gather required information")
|
||||
# Try lsb_release first
|
||||
find_program(LSB_RELEASE_EXEC lsb_release)
|
||||
|
||||
if(LSB_RELEASE_EXEC)
|
||||
execute_process(
|
||||
COMMAND "${LSB_RELEASE_EXEC}" --short --id
|
||||
OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
string(TOLOWER "${LSB_RELEASE_ID_SHORT}" LSB_RELEASE_ID_SHORT)
|
||||
|
||||
execute_process(
|
||||
COMMAND "${LSB_RELEASE_EXEC}" --short --release
|
||||
OUTPUT_VARIABLE LSB_RELEASE_VERSION_SHORT
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND "${LSB_RELEASE_EXEC}" --short --codename
|
||||
OUTPUT_VARIABLE LSB_RELEASE_CODENAME_SHORT
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
else()
|
||||
# Fallback: parse /etc/os-release
|
||||
if(EXISTS "/etc/os-release")
|
||||
file(READ "/etc/os-release" OS_RELEASE_CONTENT)
|
||||
|
||||
string(REGEX MATCH "ID=([^\n]*)" _match "${OS_RELEASE_CONTENT}")
|
||||
set(LSB_RELEASE_ID_SHORT "${CMAKE_MATCH_1}")
|
||||
# Remove quotes if present
|
||||
string(REGEX REPLACE "^\"(.*)\"$" "\\1" LSB_RELEASE_ID_SHORT "${LSB_RELEASE_ID_SHORT}")
|
||||
string(TOLOWER "${LSB_RELEASE_ID_SHORT}" LSB_RELEASE_ID_SHORT)
|
||||
|
||||
string(REGEX MATCH "VERSION_ID=([^\n]*)" _match "${OS_RELEASE_CONTENT}")
|
||||
set(LSB_RELEASE_VERSION_SHORT "${CMAKE_MATCH_1}")
|
||||
# Remove quotes if present
|
||||
string(REGEX REPLACE "^\"(.*)\"$" "\\1" LSB_RELEASE_VERSION_SHORT "${LSB_RELEASE_VERSION_SHORT}")
|
||||
|
||||
string(REGEX MATCH "VERSION_CODENAME=([^\n]*)" _match "${OS_RELEASE_CONTENT}")
|
||||
if(CMAKE_MATCH_1)
|
||||
set(LSB_RELEASE_CODENAME_SHORT "${CMAKE_MATCH_1}")
|
||||
# Remove quotes if present
|
||||
string(REGEX REPLACE "^\"(.*)\"$" "\\1" LSB_RELEASE_CODENAME_SHORT "${LSB_RELEASE_CODENAME_SHORT}")
|
||||
else()
|
||||
set(LSB_RELEASE_CODENAME_SHORT "")
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "Could not detect lsb_release or /etc/os-release, cannot gather required information")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
EXECUTE_PROCESS(COMMAND "${LSB_RELEASE_EXEC}" --short --id OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
MESSAGE(STATUS "LSB_RELEASE_ID_SHORT ${LSB_RELEASE_ID_SHORT}")
|
||||
STRING(TOLOWER ${LSB_RELEASE_ID_SHORT} LSB_RELEASE_ID_SHORT)
|
||||
MESSAGE(STATUS "LSB_RELEASE_ID_SHORT ${LSB_RELEASE_ID_SHORT}")
|
||||
message(STATUS "LSB_RELEASE_ID_SHORT ${LSB_RELEASE_ID_SHORT}")
|
||||
message(STATUS "LSB_RELEASE_VERSION_SHORT ${LSB_RELEASE_VERSION_SHORT}")
|
||||
message(STATUS "LSB_RELEASE_CODENAME_SHORT ${LSB_RELEASE_CODENAME_SHORT}")
|
||||
|
||||
EXECUTE_PROCESS(COMMAND "${LSB_RELEASE_EXEC}" --short --release OUTPUT_VARIABLE LSB_RELEASE_VERSION_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
MESSAGE(STATUS "LSB_RELEASE_VERSION_SHORT ${LSB_RELEASE_VERSION_SHORT}")
|
||||
EXECUTE_PROCESS(COMMAND "${LSB_RELEASE_EXEC}" --short --codename OUTPUT_VARIABLE LSB_RELEASE_CODENAME_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
SET(LSB_RELEASE_ID_SHORT "${LSB_RELEASE_ID_SHORT}" PARENT_SCOPE)
|
||||
SET(LSB_RELEASE_VERSION_SHORT "${LSB_RELEASE_VERSION_SHORT}" PARENT_SCOPE)
|
||||
set(LSB_RELEASE_ID_SHORT
|
||||
"${LSB_RELEASE_ID_SHORT}"
|
||||
PARENT_SCOPE
|
||||
)
|
||||
set(LSB_RELEASE_VERSION_SHORT
|
||||
"${LSB_RELEASE_VERSION_SHORT}"
|
||||
PARENT_SCOPE
|
||||
)
|
||||
set(LSB_RELEASE_CODENAME_SHORT
|
||||
"${LSB_RELEASE_CODENAME_SHORT}"
|
||||
PARENT_SCOPE
|
||||
)
|
||||
endfunction()
|
||||
|
||||
|
||||
# Read mysql version for configure script
|
||||
MACRO(GET_MYSQL_VERSION)
|
||||
MYSQL_GET_CONFIG_VALUE("MYSQL_VERSION_MAJOR" MAJOR_VERSION)
|
||||
MYSQL_GET_CONFIG_VALUE("MYSQL_VERSION_MINOR" MINOR_VERSION)
|
||||
MYSQL_GET_CONFIG_VALUE("MYSQL_VERSION_PATCH" PATCH_VERSION)
|
||||
MYSQL_GET_CONFIG_VALUE("MYSQL_VERSION_EXTRA" EXTRA_VERSION)
|
||||
MYSQL_GET_CONFIG_VALUE("SERVER_MATURITY" SERVER_MATURITY)
|
||||
macro(GET_MYSQL_VERSION)
|
||||
mysql_get_config_value("MYSQL_VERSION_MAJOR" MAJOR_VERSION)
|
||||
mysql_get_config_value("MYSQL_VERSION_MINOR" MINOR_VERSION)
|
||||
mysql_get_config_value("MYSQL_VERSION_PATCH" PATCH_VERSION)
|
||||
mysql_get_config_value("MYSQL_VERSION_EXTRA" EXTRA_VERSION)
|
||||
mysql_get_config_value("SERVER_MATURITY" SERVER_MATURITY)
|
||||
|
||||
IF(NOT "${MAJOR_VERSION}" MATCHES "[0-9]+" OR
|
||||
NOT "${MINOR_VERSION}" MATCHES "[0-9]+" OR
|
||||
NOT "${PATCH_VERSION}" MATCHES "[0-9]+")
|
||||
MESSAGE(FATAL_ERROR "VERSION file cannot be parsed.")
|
||||
ENDIF()
|
||||
IF((NOT TINY_VERSION) AND (EXTRA_VERSION MATCHES "[\\-][0-9]+"))
|
||||
STRING(REPLACE "-" "" TINY_VERSION "${EXTRA_VERSION}")
|
||||
ELSE()
|
||||
SET(TINY_VERSION "0")
|
||||
ENDIF()
|
||||
SET(VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}${EXTRA_VERSION}")
|
||||
SET(SERVER_VERSION ${VERSION})
|
||||
MESSAGE(STATUS "MariaDB ${VERSION}")
|
||||
SET(MYSQL_BASE_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}" CACHE INTERNAL "MySQL Base version")
|
||||
SET(MYSQL_NO_DASH_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}")
|
||||
MATH(EXPR MYSQL_VERSION_ID "10000*${MAJOR_VERSION} + 100*${MINOR_VERSION} + ${PATCH_VERSION}")
|
||||
MARK_AS_ADVANCED(VERSION MYSQL_VERSION_ID MYSQL_BASE_VERSION)
|
||||
SET(CPACK_PACKAGE_VERSION_MAJOR ${MAJOR_VERSION})
|
||||
SET(CPACK_PACKAGE_VERSION_MINOR ${MINOR_VERSION})
|
||||
SET(CPACK_PACKAGE_VERSION_PATCH ${PATCH_VERSION}${EXTRA_VERSION})
|
||||
ENDMACRO()
|
||||
if(NOT "${MAJOR_VERSION}" MATCHES "[0-9]+"
|
||||
OR NOT "${MINOR_VERSION}" MATCHES "[0-9]+"
|
||||
OR NOT "${PATCH_VERSION}" MATCHES "[0-9]+"
|
||||
)
|
||||
message(FATAL_ERROR "VERSION file cannot be parsed.")
|
||||
endif()
|
||||
if((NOT TINY_VERSION) AND (EXTRA_VERSION MATCHES "[\\-][0-9]+"))
|
||||
string(REPLACE "-" "" TINY_VERSION "${EXTRA_VERSION}")
|
||||
else()
|
||||
set(TINY_VERSION "0")
|
||||
endif()
|
||||
set(VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}${EXTRA_VERSION}")
|
||||
set(SERVER_VERSION ${VERSION})
|
||||
message(STATUS "MariaDB ${VERSION}")
|
||||
set(MYSQL_BASE_VERSION
|
||||
"${MAJOR_VERSION}.${MINOR_VERSION}"
|
||||
CACHE INTERNAL "MySQL Base version"
|
||||
)
|
||||
set(MYSQL_NO_DASH_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}")
|
||||
math(EXPR MYSQL_VERSION_ID "10000*${MAJOR_VERSION} + 100*${MINOR_VERSION} + ${PATCH_VERSION}")
|
||||
mark_as_advanced(VERSION MYSQL_VERSION_ID MYSQL_BASE_VERSION)
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR ${MAJOR_VERSION})
|
||||
set(CPACK_PACKAGE_VERSION_MINOR ${MINOR_VERSION})
|
||||
set(CPACK_PACKAGE_VERSION_PATCH ${PATCH_VERSION}${EXTRA_VERSION})
|
||||
endmacro()
|
||||
|
||||
@@ -16,6 +16,7 @@ from cherrypy.process import plugins
|
||||
# TODO: fix dispatcher choose logic because code executing in endpoints.py
|
||||
# while import process, this cause module logger misconfiguration
|
||||
from cmapi_server.logging_management import config_cmapi_server_logging
|
||||
from cmapi_server.sentry import maybe_init_sentry, register_sentry_cherrypy_tool
|
||||
config_cmapi_server_logging()
|
||||
|
||||
from cmapi_server import helpers
|
||||
@@ -140,15 +141,24 @@ if __name__ == '__main__':
|
||||
# TODO: read cmapi config filepath as an argument
|
||||
helpers.cmapi_config_check()
|
||||
|
||||
# Init Sentry if DSN is present
|
||||
sentry_active = maybe_init_sentry()
|
||||
if sentry_active:
|
||||
register_sentry_cherrypy_tool()
|
||||
|
||||
CertificateManager.create_self_signed_certificate_if_not_exist()
|
||||
CertificateManager.renew_certificate()
|
||||
|
||||
app = cherrypy.tree.mount(root=None, config=CMAPI_CONF_PATH)
|
||||
root_config = {
|
||||
"request.dispatch": dispatcher,
|
||||
"error_page.default": jsonify_error,
|
||||
}
|
||||
if sentry_active:
|
||||
root_config["tools.sentry.on"] = True
|
||||
|
||||
app.config.update({
|
||||
'/': {
|
||||
'request.dispatch': dispatcher,
|
||||
'error_page.default': jsonify_error,
|
||||
},
|
||||
'/': root_config,
|
||||
'config': {
|
||||
'path': CMAPI_CONF_PATH,
|
||||
},
|
||||
|
||||
197
cmapi/cmapi_server/sentry.py
Normal file
197
cmapi/cmapi_server/sentry.py
Normal file
@@ -0,0 +1,197 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import cherrypy
|
||||
import sentry_sdk
|
||||
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
|
||||
from sentry_sdk.integrations.logging import LoggingIntegration
|
||||
|
||||
from cmapi_server import helpers
|
||||
from cmapi_server.constants import CMAPI_CONF_PATH
|
||||
|
||||
SENTRY_ACTIVE = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def maybe_init_sentry() -> bool:
|
||||
"""Initialize Sentry from CMAPI configuration.
|
||||
|
||||
Reads config and initializes Sentry only if dsn parameter is present in corresponding section.
|
||||
The initialization enables the following integrations:
|
||||
- LoggingIntegration: capture warning-level logs as Sentry events and use
|
||||
lower-level logs as breadcrumbs.
|
||||
- AioHttpIntegration: propagate trace headers for outbound requests made
|
||||
with `aiohttp`.
|
||||
|
||||
The function is a no-op if the DSN is missing.
|
||||
|
||||
Returns: True if Sentry is initialized, False otherwise.
|
||||
"""
|
||||
global SENTRY_ACTIVE
|
||||
try:
|
||||
cfg_parser = helpers.get_config_parser(CMAPI_CONF_PATH)
|
||||
dsn = helpers.dequote(
|
||||
cfg_parser.get('Sentry', 'dsn', fallback='').strip()
|
||||
)
|
||||
if not dsn:
|
||||
return False
|
||||
|
||||
environment = helpers.dequote(
|
||||
cfg_parser.get('Sentry', 'environment', fallback='development').strip()
|
||||
)
|
||||
traces_sample_rate_str = helpers.dequote(
|
||||
cfg_parser.get('Sentry', 'traces_sample_rate', fallback='1.0').strip()
|
||||
)
|
||||
except Exception:
|
||||
logger.exception('Failed to initialize Sentry.')
|
||||
return False
|
||||
|
||||
try:
|
||||
sentry_logging = LoggingIntegration(
|
||||
level=logging.INFO,
|
||||
event_level=logging.WARNING,
|
||||
)
|
||||
|
||||
try:
|
||||
traces_sample_rate = float(traces_sample_rate_str)
|
||||
except ValueError:
|
||||
logger.error('Invalid traces_sample_rate: %s', traces_sample_rate_str)
|
||||
traces_sample_rate = 1.0
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn=dsn,
|
||||
environment=environment,
|
||||
traces_sample_rate=traces_sample_rate,
|
||||
integrations=[sentry_logging, AioHttpIntegration()],
|
||||
)
|
||||
SENTRY_ACTIVE = True
|
||||
logger.info('Sentry initialized for CMAPI via config.')
|
||||
except Exception:
|
||||
logger.exception('Failed to initialize Sentry.')
|
||||
return False
|
||||
|
||||
logger.info('Sentry successfully initialized.')
|
||||
return True
|
||||
|
||||
def _sentry_on_start_resource():
|
||||
"""Start or continue a Sentry transaction for the current CherryPy request.
|
||||
|
||||
- Continues an incoming distributed trace using Sentry trace headers if
|
||||
present; otherwise starts a new transaction with `op='http.server'`.
|
||||
- Pushes the transaction into the current Sentry scope and attaches useful
|
||||
request metadata as tags and context (HTTP method, path, client IP,
|
||||
hostname, request ID, and a filtered subset of headers).
|
||||
- Stores the transaction on the CherryPy request object for later finishing
|
||||
in `_sentry_on_end_request`.
|
||||
"""
|
||||
if not SENTRY_ACTIVE:
|
||||
return
|
||||
try:
|
||||
request = cherrypy.request
|
||||
headers = dict(getattr(request, 'headers', {}) or {})
|
||||
name = f"{request.method} {request.path_info}"
|
||||
transaction = sentry_sdk.start_transaction(
|
||||
op='http.server', name=name, continue_from_headers=headers
|
||||
)
|
||||
sentry_sdk.Hub.current.scope.set_span(transaction)
|
||||
|
||||
# Add request-level context/tags
|
||||
scope = sentry_sdk.Hub.current.scope
|
||||
scope.set_tag('http.method', request.method)
|
||||
scope.set_tag('http.path', request.path_info)
|
||||
scope.set_tag('client.ip', getattr(request.remote, 'ip', ''))
|
||||
scope.set_tag('instance.hostname', socket.gethostname())
|
||||
request_id = getattr(request, 'unique_id', None)
|
||||
if request_id:
|
||||
scope.set_tag('request.id', request_id)
|
||||
# Optionally add headers as context without sensitive values
|
||||
safe_headers = {k: v for k, v in headers.items()
|
||||
if k.lower() not in {'authorization', 'x-api-key'}}
|
||||
scope.set_context('headers', safe_headers)
|
||||
|
||||
request.sentry_transaction = transaction
|
||||
except Exception:
|
||||
logger.exception('Failed to start Sentry transaction.')
|
||||
|
||||
|
||||
def _sentry_before_error_response():
|
||||
"""Capture the current exception (if any) to Sentry before error response.
|
||||
|
||||
This hook runs when CherryPy prepares an error response. If an exception is
|
||||
available in the current context, it will be sent to Sentry.
|
||||
"""
|
||||
if not SENTRY_ACTIVE:
|
||||
return
|
||||
try:
|
||||
sentry_sdk.capture_exception()
|
||||
except Exception:
|
||||
logger.exception('Failed to capture exception to Sentry.')
|
||||
|
||||
|
||||
def _sentry_on_end_request():
|
||||
"""Finish the Sentry transaction for the current CherryPy request.
|
||||
|
||||
Attempts to set the HTTP status code on the active transaction and then
|
||||
finishes it. If no transaction was started on this request, the function is
|
||||
a no-op.
|
||||
"""
|
||||
if not SENTRY_ACTIVE:
|
||||
return
|
||||
try:
|
||||
request = cherrypy.request
|
||||
transaction = getattr(request, 'sentry_transaction', None)
|
||||
if transaction is None:
|
||||
return
|
||||
status = cherrypy.response.status
|
||||
try:
|
||||
status_code = int(str(status).split()[0])
|
||||
except Exception:
|
||||
status_code = None
|
||||
try:
|
||||
if status_code is not None and hasattr(transaction, 'set_http_status'):
|
||||
transaction.set_http_status(status_code)
|
||||
except Exception:
|
||||
logger.exception('Failed to set HTTP status code on Sentry transaction.')
|
||||
transaction.finish()
|
||||
except Exception:
|
||||
logger.exception('Failed to finish Sentry transaction.')
|
||||
|
||||
|
||||
class SentryTool(cherrypy.Tool):
|
||||
"""CherryPy Tool that wires Sentry request lifecycle hooks.
|
||||
|
||||
The tool attaches handlers for `on_start_resource`, `before_error_response`,
|
||||
and `on_end_request` in order to manage Sentry transactions and error
|
||||
capture across the request lifecycle.
|
||||
"""
|
||||
def __init__(self):
|
||||
cherrypy.Tool.__init__(self, 'on_start_resource', self._tool_callback, priority=50)
|
||||
|
||||
@staticmethod
|
||||
def _tool_callback():
|
||||
"""Attach Sentry lifecycle callbacks to the current CherryPy request."""
|
||||
cherrypy.request.hooks.attach(
|
||||
'on_start_resource', _sentry_on_start_resource, priority=50
|
||||
)
|
||||
cherrypy.request.hooks.attach(
|
||||
'before_error_response', _sentry_before_error_response, priority=60
|
||||
)
|
||||
cherrypy.request.hooks.attach(
|
||||
'on_end_request', _sentry_on_end_request, priority=70
|
||||
)
|
||||
|
||||
|
||||
def register_sentry_cherrypy_tool() -> None:
|
||||
"""Register the Sentry CherryPy tool under `tools.sentry`.
|
||||
|
||||
This function is safe to call multiple times; failures are silently ignored
|
||||
to avoid impacting the application startup.
|
||||
"""
|
||||
if not SENTRY_ACTIVE:
|
||||
return
|
||||
|
||||
try:
|
||||
cherrypy.tools.sentry = SentryTool()
|
||||
except Exception:
|
||||
logger.exception('Failed to register Sentry CherryPy tool.')
|
||||
|
||||
70
cmapi/dev_tools/piptools.sh
Executable file
70
cmapi/dev_tools/piptools.sh
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cmapi_dir="$(realpath "${script_dir}/..")"
|
||||
|
||||
export CUSTOM_COMPILE_COMMAND="dev_tools/piptools.sh compile-all"
|
||||
|
||||
ensure_piptools() {
|
||||
if ! command -v pip-compile >/dev/null 2>&1; then
|
||||
echo "Installing pip-tools..."
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install pip-tools
|
||||
fi
|
||||
}
|
||||
|
||||
compile_runtime() {
|
||||
ensure_piptools
|
||||
cd "${cmapi_dir}"
|
||||
pip-compile --quiet --resolver=backtracking --output-file=requirements.txt requirements.in
|
||||
}
|
||||
|
||||
compile_dev() {
|
||||
ensure_piptools
|
||||
cd "${cmapi_dir}"
|
||||
pip-compile --quiet --resolver=backtracking --output-file=requirements-dev.txt requirements-dev.in
|
||||
}
|
||||
|
||||
compile_all() {
|
||||
compile_runtime
|
||||
compile_dev
|
||||
}
|
||||
|
||||
sync_runtime() {
|
||||
ensure_piptools
|
||||
cd "${cmapi_dir}"
|
||||
pip-sync requirements.txt
|
||||
}
|
||||
|
||||
sync_dev() {
|
||||
ensure_piptools
|
||||
cd "${cmapi_dir}"
|
||||
pip-sync requirements.txt requirements-dev.txt
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: dev_tools/piptools.sh <command>
|
||||
|
||||
Commands:
|
||||
compile-runtime Compile requirements.in -> requirements.txt
|
||||
compile-dev Compile requirements-dev.in -> requirements-dev.txt
|
||||
compile-all Compile both runtime and dev requirements (default)
|
||||
sync-runtime pip-sync runtime requirements only
|
||||
sync-dev pip-sync runtime + dev requirements
|
||||
help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
cmd="${1:-compile-all}"
|
||||
case "${cmd}" in
|
||||
compile-runtime) compile_runtime ;;
|
||||
compile-dev) compile_dev ;;
|
||||
compile-all) compile_all ;;
|
||||
sync-runtime) sync_runtime ;;
|
||||
sync-dev) sync_dev ;;
|
||||
help|--help|-h) usage ;;
|
||||
*) echo "Unknown command: ${cmd}" >&2; usage; exit 1 ;;
|
||||
esac
|
||||
|
||||
8
cmapi/requirements-dev.in
Normal file
8
cmapi/requirements-dev.in
Normal file
@@ -0,0 +1,8 @@
|
||||
# Direct, top-level development/testing dependencies
|
||||
# Compile with: pip-compile --output-file=requirements-dev.txt requirements-dev.in
|
||||
|
||||
pytest==8.3.5
|
||||
fabric==3.2.2
|
||||
# Tooling
|
||||
pip-tools
|
||||
|
||||
@@ -1,37 +1,69 @@
|
||||
# For integration tests
|
||||
pytest==8.3.5
|
||||
fabric==3.2.2
|
||||
|
||||
# This frozen part is autogenerated by pip-compile: pip-compile requirements-dev.txt
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.9
|
||||
# by the following command:
|
||||
#
|
||||
# dev_tools/piptools.sh compile-all
|
||||
#
|
||||
bcrypt==4.3.0
|
||||
# via paramiko
|
||||
build==1.3.0
|
||||
# via pip-tools
|
||||
cffi==1.17.1
|
||||
# via
|
||||
# cryptography
|
||||
# pynacl
|
||||
click==8.1.8
|
||||
# via pip-tools
|
||||
cryptography==45.0.5
|
||||
# via paramiko
|
||||
decorator==5.2.1
|
||||
# via fabric
|
||||
deprecated==1.2.18
|
||||
# via fabric
|
||||
exceptiongroup==1.3.0
|
||||
# via pytest
|
||||
fabric==3.2.2
|
||||
# via -r requirements-dev.txt
|
||||
# via -r requirements-dev.in
|
||||
importlib-metadata==8.7.0
|
||||
# via build
|
||||
iniconfig==2.1.0
|
||||
# via pytest
|
||||
invoke==2.2.0
|
||||
# via fabric
|
||||
packaging==25.0
|
||||
# via pytest
|
||||
# via
|
||||
# build
|
||||
# pytest
|
||||
paramiko==3.5.1
|
||||
# via fabric
|
||||
pip-tools==7.5.0
|
||||
# via -r requirements-dev.in
|
||||
pluggy==1.6.0
|
||||
# via pytest
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pynacl==1.5.0
|
||||
# via paramiko
|
||||
pyproject-hooks==1.2.0
|
||||
# via
|
||||
# build
|
||||
# pip-tools
|
||||
pytest==8.3.5
|
||||
# via -r requirements-dev.txt
|
||||
# via -r requirements-dev.in
|
||||
tomli==2.2.1
|
||||
# via
|
||||
# build
|
||||
# pip-tools
|
||||
# pytest
|
||||
typing-extensions==4.14.1
|
||||
# via exceptiongroup
|
||||
wheel==0.45.1
|
||||
# via pip-tools
|
||||
wrapt==1.17.2
|
||||
# via deprecated
|
||||
zipp==3.23.0
|
||||
# via importlib-metadata
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# pip
|
||||
# setuptools
|
||||
|
||||
19
cmapi/requirements.in
Normal file
19
cmapi/requirements.in
Normal file
@@ -0,0 +1,19 @@
|
||||
# Direct, top-level runtime dependencies for cmapi
|
||||
# Compile with: pip-compile --output-file=requirements.txt requirements.in
|
||||
|
||||
aiohttp==3.11.16
|
||||
awscli==1.38.28
|
||||
CherryPy==18.10.0
|
||||
cryptography==43.0.3
|
||||
furl==2.1.4
|
||||
gsutil==5.33
|
||||
lxml==5.3.2
|
||||
psutil==7.0.0
|
||||
pyotp==2.9.0
|
||||
requests==2.32.3
|
||||
# required for CherryPy RoutesDispatcher,
|
||||
# but CherryPy itself has no such dependency
|
||||
Routes==2.5.1
|
||||
typer==0.15.2
|
||||
sentry-sdk==2.34.1
|
||||
|
||||
@@ -1,78 +1,231 @@
|
||||
aiohttp==3.11.16
|
||||
awscli==1.38.28
|
||||
CherryPy==18.10.0
|
||||
cryptography==43.0.3
|
||||
furl==2.1.4
|
||||
gsutil==5.33
|
||||
lxml==5.3.2
|
||||
psutil==7.0.0
|
||||
pyotp==2.9.0
|
||||
requests==2.32.3
|
||||
# required for CherryPy RoutesDispatcher,
|
||||
# but CherryPy itself has no such a dependency
|
||||
Routes==2.5.1
|
||||
typer==0.15.2
|
||||
|
||||
# indirect dependencies
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.9
|
||||
# by the following command:
|
||||
#
|
||||
# dev_tools/piptools.sh compile-all
|
||||
#
|
||||
aiohappyeyeballs==2.6.1
|
||||
# via aiohttp
|
||||
aiohttp==3.11.16
|
||||
# via
|
||||
# -r requirements.in
|
||||
# google-auth
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
argcomplete==3.6.2
|
||||
# via gsutil
|
||||
async-timeout==5.0.1
|
||||
# via aiohttp
|
||||
attrs==25.3.0
|
||||
# via aiohttp
|
||||
autocommand==2.2.2
|
||||
backports.tarfile==1.2.0
|
||||
# via jaraco-text
|
||||
awscli==1.38.28
|
||||
# via -r requirements.in
|
||||
backports-tarfile==1.2.0
|
||||
# via jaraco-context
|
||||
boto==2.49.0
|
||||
# via gcs-oauth2-boto-plugin
|
||||
botocore==1.37.28
|
||||
# via
|
||||
# awscli
|
||||
# s3transfer
|
||||
cachetools==5.5.2
|
||||
# via google-auth
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# requests
|
||||
# sentry-sdk
|
||||
cffi==1.17.1
|
||||
# via cryptography
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
cheroot==10.0.1
|
||||
# via cherrypy
|
||||
cherrypy==18.10.0
|
||||
# via -r requirements.in
|
||||
click==8.1.8
|
||||
# via typer
|
||||
colorama==0.4.6
|
||||
# via awscli
|
||||
crcmod==1.7
|
||||
# via gsutil
|
||||
cryptography==43.0.3
|
||||
# via
|
||||
# -r requirements.in
|
||||
# pyopenssl
|
||||
docutils==0.16
|
||||
# via awscli
|
||||
fasteners==0.19
|
||||
# via
|
||||
# google-apitools
|
||||
# gsutil
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
furl==2.1.4
|
||||
# via -r requirements.in
|
||||
gcs-oauth2-boto-plugin==3.2
|
||||
# via gsutil
|
||||
google-apitools==0.5.32
|
||||
google-auth==2.17.0
|
||||
# via gsutil
|
||||
google-auth[aiohttp]==2.17.0
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# google-auth-httplib2
|
||||
# gsutil
|
||||
google-auth-httplib2==0.2.0
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# gsutil
|
||||
google-reauth==0.1.1
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# gsutil
|
||||
gsutil==5.33
|
||||
# via -r requirements.in
|
||||
httplib2==0.20.4
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# google-apitools
|
||||
# google-auth-httplib2
|
||||
# gsutil
|
||||
# oauth2client
|
||||
idna==3.10
|
||||
jaraco.collections==5.1.0
|
||||
jaraco.context==6.0.1
|
||||
jaraco.functools==4.1.0
|
||||
jaraco.text==4.0.0
|
||||
# via
|
||||
# requests
|
||||
# yarl
|
||||
jaraco-collections==5.1.0
|
||||
# via cherrypy
|
||||
jaraco-context==6.0.1
|
||||
# via jaraco-text
|
||||
jaraco-functools==4.1.0
|
||||
# via
|
||||
# cheroot
|
||||
# jaraco-text
|
||||
# tempora
|
||||
jaraco-text==4.0.0
|
||||
# via jaraco-collections
|
||||
jmespath==1.0.1
|
||||
# via botocore
|
||||
lxml==5.3.2
|
||||
# via -r requirements.in
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
monotonic==1.6
|
||||
# via gsutil
|
||||
more-itertools==10.6.0
|
||||
# via
|
||||
# cheroot
|
||||
# cherrypy
|
||||
# jaraco-functools
|
||||
# jaraco-text
|
||||
multidict==6.3.2
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
oauth2client==4.1.3
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# google-apitools
|
||||
orderedmultidict==1.0.1
|
||||
# via furl
|
||||
portend==3.2.0
|
||||
# via cherrypy
|
||||
propcache==0.3.1
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
psutil==7.0.0
|
||||
# via -r requirements.in
|
||||
pyasn1==0.6.1
|
||||
pyasn1_modules==0.4.2
|
||||
# via
|
||||
# oauth2client
|
||||
# pyasn1-modules
|
||||
# rsa
|
||||
pyasn1-modules==0.4.2
|
||||
# via
|
||||
# google-auth
|
||||
# oauth2client
|
||||
pycparser==2.22
|
||||
Pygments==2.19.1
|
||||
pyOpenSSL==24.2.1
|
||||
# via cffi
|
||||
pygments==2.19.1
|
||||
# via rich
|
||||
pyopenssl==24.2.1
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# gsutil
|
||||
pyotp==2.9.0
|
||||
# via -r requirements.in
|
||||
pyparsing==3.2.3
|
||||
# via httplib2
|
||||
python-dateutil==2.9.0.post0
|
||||
# via
|
||||
# botocore
|
||||
# tempora
|
||||
pyu2f==0.1.5
|
||||
PyYAML==6.0.2
|
||||
repoze.lru==0.7
|
||||
# via google-reauth
|
||||
pyyaml==6.0.2
|
||||
# via awscli
|
||||
repoze-lru==0.7
|
||||
# via routes
|
||||
requests==2.32.3
|
||||
# via
|
||||
# -r requirements.in
|
||||
# google-auth
|
||||
retry-decorator==1.1.1
|
||||
# via
|
||||
# gcs-oauth2-boto-plugin
|
||||
# gsutil
|
||||
rich==14.0.0
|
||||
# via typer
|
||||
routes==2.5.1
|
||||
# via -r requirements.in
|
||||
rsa==4.7.2
|
||||
# via
|
||||
# awscli
|
||||
# gcs-oauth2-boto-plugin
|
||||
# google-auth
|
||||
# oauth2client
|
||||
s3transfer==0.11.4
|
||||
# via awscli
|
||||
sentry-sdk==2.34.1
|
||||
# via -r requirements.in
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
six==1.17.0
|
||||
# via
|
||||
# furl
|
||||
# gcs-oauth2-boto-plugin
|
||||
# google-apitools
|
||||
# google-auth
|
||||
# gsutil
|
||||
# oauth2client
|
||||
# orderedmultidict
|
||||
# python-dateutil
|
||||
# pyu2f
|
||||
# routes
|
||||
tempora==5.8.0
|
||||
typing_extensions==4.13.1
|
||||
# via portend
|
||||
typer==0.15.2
|
||||
# via -r requirements.in
|
||||
typing-extensions==4.13.1
|
||||
# via
|
||||
# multidict
|
||||
# rich
|
||||
# typer
|
||||
urllib3==1.26.20
|
||||
# via
|
||||
# botocore
|
||||
# requests
|
||||
# sentry-sdk
|
||||
yarl==1.19.0
|
||||
zc.lockfile==3.0.post1
|
||||
# via aiohttp
|
||||
zc-lockfile==3.0.post1
|
||||
# via cherrypy
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
########################################################################
|
||||
# Documentation: bash mcs_backup_manager.sh help
|
||||
# Version: 3.15
|
||||
# Version: 3.17
|
||||
#
|
||||
# Backup Example
|
||||
# LocalStorage: sudo ./mcs_backup_manager.sh backup
|
||||
@@ -26,7 +26,7 @@
|
||||
# S3: sudo ./mcs_backup_manager.sh restore -bb s3://my-cs-backups -l <date>
|
||||
#
|
||||
########################################################################
|
||||
mcs_bk_manager_version="3.15"
|
||||
mcs_bk_manager_version="3.17"
|
||||
start=$(date +%s)
|
||||
action=$1
|
||||
|
||||
@@ -200,6 +200,7 @@ load_default_backup_variables() {
|
||||
|
||||
# Number of DBroots
|
||||
# Integer usually 1 or 3
|
||||
DBROOT_COUNT=1
|
||||
DBROOT_COUNT=$(xmllint --xpath "string(//DBRootCount)" $CS_CONFIGS_PATH/Columnstore.xml)
|
||||
ASSIGNED_DBROOT=$(xmllint --xpath "string(//ModuleDBRootID$PM_NUMBER-1-3)" $CS_CONFIGS_PATH/Columnstore.xml)
|
||||
}
|
||||
@@ -715,7 +716,7 @@ validation_prechecks_for_backup() {
|
||||
if [ ! -d $backup_location ]; then
|
||||
handle_early_exit_on_backup "[X] Backup directory ($backup_location) DOES NOT exist ( -bl <directory> ) \n\n" true;
|
||||
fi
|
||||
echo "today::::: $today"
|
||||
|
||||
if [ "$today" == "auto_most_recent" ]; then
|
||||
auto_select_most_recent_backup_for_incremental
|
||||
fi
|
||||
@@ -1062,7 +1063,8 @@ issue_write_locks()
|
||||
printf " - Issuing read-only lock to Columnstore Engine ... ";
|
||||
if ! $columnstore_online; then
|
||||
printf "Skip since offline\n";
|
||||
elif [ $DBROOT_COUNT == "1" ] && [[ -n "$startreadonly_exists" ]]; then
|
||||
|
||||
elif [[ -n "$startreadonly_exists" ]]; then
|
||||
if dbrmctl startreadonly ; then
|
||||
if ! $skip_polls; then
|
||||
cs_read_only_wait_loop
|
||||
@@ -1147,7 +1149,7 @@ poll_check_no_active_cpimports() {
|
||||
break
|
||||
else
|
||||
printf "."
|
||||
if ! $quiet; then printf "\n$active_cpimports"; fi;
|
||||
if ! $quiet; then printf "\n$active_cpimports\n - Waiting for cpimports to finish ..."; fi;
|
||||
sleep "$poll_interval"
|
||||
((attempts++))
|
||||
fi
|
||||
@@ -1310,7 +1312,6 @@ clear_read_lock() {
|
||||
fi
|
||||
|
||||
if [ $pm == "pm1" ]; then
|
||||
|
||||
# Clear CS Lock
|
||||
printf " - Clearing read-only lock on Columnstore Engine ... ";
|
||||
if ! $columnstore_online; then
|
||||
@@ -1362,6 +1363,21 @@ handle_ctrl_c_backup() {
|
||||
handle_early_exit_on_backup
|
||||
}
|
||||
|
||||
handle_ctrl_c_dbrm_backup() {
|
||||
echo "Ctrl+C captured. handle_early_exit_on_dbrm_backup..."
|
||||
handle_early_exit_on_dbrm_backup
|
||||
}
|
||||
|
||||
# $1 is the error message
|
||||
# $2 is false by default, meaning you need to clear the read lock, true means locks dont exist or not needed to be cleared
|
||||
handle_early_exit_on_dbrm_backup() {
|
||||
$skip_clear_locks=${2:-false}
|
||||
if ! $skip_clear_locks; then clear_read_lock; fi;
|
||||
printf "\nDBRM Backup Failed: $1\n"
|
||||
alert "$1"
|
||||
exit 1;
|
||||
}
|
||||
|
||||
alert() {
|
||||
# echo "Not implemented yet"
|
||||
# slack, email, webhook curl endpoint etc.
|
||||
@@ -1398,8 +1414,66 @@ wait_on_rsync()
|
||||
done
|
||||
}
|
||||
|
||||
# retry_failed_rsync uses this to retry rsync for any failed rsyncs
|
||||
# @param $1 - retry depth/attempt
|
||||
retry_failed_rsync() {
|
||||
|
||||
local next_depth=$(( $1 + 1 ))
|
||||
local retry_rsync_log="mcs-failed-rsync-attempt-$1.log"
|
||||
local new_retry_rsync_log="mcs-failed-rsync-attempt-$next_depth.log"
|
||||
|
||||
# confirm retry depth
|
||||
if [ -z "$1" ]; then
|
||||
echo "No retry depth specified, exiting"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if max retry depth reached
|
||||
if [ "$1" -gt "$max_retries" ]; then
|
||||
echo "Max retry dept of $max_retries reached ... exiting"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if retry log exists
|
||||
if [ -f "$retry_rsync_log" ]; then
|
||||
printf "\n%-s\n" "Retrying failed rsyncs... Attempt #$1"
|
||||
while IFS= read -r line; do
|
||||
source=$(echo "$line" | sed -n 's/.*Failed Source: \(.*\) Copy to Location:.*/\1/p')
|
||||
target=$(echo "$line" | sed -n 's/.*Copy to Location: \(.*\)$/\1/p')
|
||||
# Remove folder from target else rsync will nest folders improperly
|
||||
target_base=$(dirname "$target")
|
||||
|
||||
# additional_rsync_flags
|
||||
printf " - rsync $parallel_rsync_flags %-s %-s ..." "$source" "$target_base"
|
||||
if rsync $parallel_rsync_flags "$source" "$target_base"; then
|
||||
printf " Done\n"
|
||||
else
|
||||
echo "Failed Source: $source Copy to Location: $target" >> "$new_retry_rsync_log"
|
||||
printf " Failed\n"
|
||||
fi
|
||||
|
||||
done < "$retry_rsync_log"
|
||||
else
|
||||
echo "Retry log ($retry_rsync_log) does not exist ... returning error"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# If any rsync failed, retry again
|
||||
if [ -f "$new_retry_rsync_log" ]; then
|
||||
retry_failed_rsync $next_depth
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
if [ -f $retry_rsync_log ]; then rm $retry_rsync_log; fi;
|
||||
}
|
||||
|
||||
initiate_rsyncs() {
|
||||
local dbrootToSync=$1
|
||||
retry_rsync_log="mcs-failed-rsync-attempt-1.log"
|
||||
max_retries=10
|
||||
if [ -f $retry_rsync_log ]; then rm -rf $retry_rsync_log; fi;
|
||||
|
||||
# Separate flags for parallel rsync from additional_rsync_flags - never have verbose rsync for parallel
|
||||
parallel_rsync_flags=" -a "
|
||||
if $incremental ; then parallel_rsync_flags+=" --inplace --no-whole-file --delete"; fi;
|
||||
|
||||
@@ -1408,6 +1482,31 @@ initiate_rsyncs() {
|
||||
#jobs
|
||||
wait_on_rsync true 2 0 $dbrootToSync
|
||||
wait
|
||||
|
||||
# Check if any rsync failed and retry rsync for that folder
|
||||
if [ -f $retry_rsync_log ]; then
|
||||
retry_failed_rsync 1
|
||||
if [ $? -eq 0 ]; then
|
||||
if [ -f $retry_rsync_log ]; then rm $retry_rsync_log; fi;
|
||||
fi
|
||||
|
||||
# if a file with mcs-failed-rsync-attempt-*.log exists, report it
|
||||
if ls mcs-failed-rsync-attempt-*.log 1> /dev/null 2>&1; then
|
||||
printf "\n%-s\n" "Could not rsync some directories, please manually triage ... $(date)"
|
||||
# Print the file name and the contents of the log files in bash
|
||||
for file in mcs-failed-rsync-attempt-*.log; do
|
||||
mv -f $file $file.$today
|
||||
printf "%-s\n" "File: $file.$today"
|
||||
while IFS= read -r line; do
|
||||
source=$(echo "$line" | sed -n 's/.*Failed Source: \(.*\) Copy to Location:.*/\1/p')
|
||||
target=$(echo "$line" | sed -n 's/.*Copy to Location: \(.*\)$/\1/p')
|
||||
printf " %-s: Source Size: %-s Failed Target Size: %-s \n" "$(date)" "$(du -sb $source)" "$(du -sb $target)"
|
||||
|
||||
done < "$file.$today"
|
||||
done
|
||||
handle_early_exit_on_backup "\n\n[!] Rsync failed for some directories, please check the logs above\nConsider an incremental backup to continue where it left off: --incremental auto_most_recent \n"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# A recursive function that increments depthCurrent+1 each directory it goes deeper and issuing rsync on each directory remaining at the target depth
|
||||
@@ -1442,8 +1541,8 @@ deepParallelRsync() {
|
||||
if ls $fullFilePath | xargs -P $PARALLEL_THREADS -I {} rsync $parallel_rsync_flags $fullFilePath/{} $backup_location$today/$relativePath/$fileName ; then
|
||||
echo " + Completed: $backup_location$today/$relativePath/$fileName"
|
||||
else
|
||||
echo "Failed: $backup_location$today/$relativePath/$fileName"
|
||||
exit 1;
|
||||
echo "Failed Source: $fullFilePath Copy to Location: $backup_location$today/$relativePath/$fileName" >> $retry_rsync_log
|
||||
echo "Failed Source: $fullFilePath Copy to Location: $backup_location$today/$relativePath/$fileName ... added to $retry_rsync_log"
|
||||
fi
|
||||
|
||||
else
|
||||
@@ -1500,6 +1599,7 @@ human_readable_time() {
|
||||
}
|
||||
|
||||
run_backup() {
|
||||
trap handle_ctrl_c_backup SIGINT
|
||||
backup_start=$(date +%s)
|
||||
if [ $storage == "LocalStorage" ]; then
|
||||
if [ $backup_destination == "Local" ]; then
|
||||
@@ -1537,7 +1637,7 @@ run_backup() {
|
||||
printf " - Parallel Rsync CS Data$i... \n"
|
||||
initiate_rsyncs $i
|
||||
columnstore_backup_end=$(date +%s)
|
||||
printf " Done $(human_readable_time $((columnstore_backup_end-columnstore_backup_start))) \n"
|
||||
printf " - Parallel Rsync CS Data$i Done $(human_readable_time $((columnstore_backup_end-columnstore_backup_start))) \n"
|
||||
else
|
||||
printf " - Syncing Columnstore Data$i... "
|
||||
eval "rsync $additional_rsync_flags /var/lib/columnstore/data$i/* $backup_location$today/data$i/ $extra_cmd_args";
|
||||
@@ -1770,7 +1870,6 @@ run_backup() {
|
||||
|
||||
printf "\nS3 Backup\n"
|
||||
# consistency check - wait for assigned journal dir to be empty
|
||||
trap handle_ctrl_c_backup SIGINT
|
||||
i=1
|
||||
j_counts=$(find $cs_journal/data$ASSIGNED_DBROOT/* -type f 2>/dev/null | wc -l)
|
||||
max_wait=180
|
||||
@@ -2325,7 +2424,7 @@ print_restore_variables()
|
||||
echo "Scp: $scp"
|
||||
echo "Storage: $storage"
|
||||
echo "Load Date: $load_date"
|
||||
echo "timestamp: $(date +%m-%d-%Y-%H%M%S)"
|
||||
echo "timestamp: $(date)"
|
||||
echo "DB Root Count: $DBROOT_COUNT"
|
||||
echo "PM: $pm"
|
||||
echo "PM Number: $pm_number"
|
||||
@@ -2334,7 +2433,7 @@ print_restore_variables()
|
||||
echo "Backup Location: $backup_location"
|
||||
echo "Storage: $storage"
|
||||
echo "Load Date: $load_date"
|
||||
echo "timestamp: $(date +%m-%d-%Y-%H%M%S)"
|
||||
echo "timestamp: $(date)"
|
||||
echo "PM: $pm"
|
||||
echo "PM Number: $pm_number"
|
||||
echo "Active bucket: $( grep -m 1 "^bucket =" $STORAGEMANGER_CNF)"
|
||||
@@ -2994,37 +3093,72 @@ run_restore()
|
||||
}
|
||||
|
||||
load_default_dbrm_variables() {
|
||||
# Fixed Paths
|
||||
CS_CONFIGS_PATH="/etc/columnstore"
|
||||
DBRM_PATH="/var/lib/columnstore/data1/systemFiles/dbrm"
|
||||
STORAGEMANAGER_PATH="/var/lib/columnstore/storagemanager"
|
||||
STORAGEMANGER_CNF="$CS_CONFIGS_PATH/storagemanager.cnf"
|
||||
|
||||
# Default variables
|
||||
backup_base_name="dbrm_backup"
|
||||
backup_interval_minutes=90
|
||||
retention_days=0
|
||||
backup_location=/tmp/dbrm_backups
|
||||
STORAGEMANGER_CNF="/etc/columnstore/storagemanager.cnf"
|
||||
storage=$(grep -m 1 "^service = " $STORAGEMANGER_CNF | awk '{print $3}')
|
||||
skip_storage_manager=false
|
||||
mode="once"
|
||||
dbrm_backup_mode="once"
|
||||
quiet=false
|
||||
|
||||
skip_save_brm=false
|
||||
skip_locks=false
|
||||
skip_polls=false
|
||||
poll_interval=3
|
||||
poll_max_wait=60;
|
||||
list_dbrm_backups=false
|
||||
dbrm_dir="/var/lib/columnstore/data1/systemFiles/dbrm"
|
||||
if [ "$storage" == "S3" ]; then
|
||||
dbrm_dir="/var/lib/columnstore/storagemanager"
|
||||
DBRM_PATH="/var/lib/columnstore/storagemanager"
|
||||
fi
|
||||
today=$(date +%m-%d-%Y)
|
||||
|
||||
# PM and PM number
|
||||
if [ ! -f /var/lib/columnstore/local/module ]; then
|
||||
pm="pm1";
|
||||
else
|
||||
pm=$(cat /var/lib/columnstore/local/module);
|
||||
fi;
|
||||
PM_NUMBER=$(echo "$pm" | tr -dc '0-9')
|
||||
if [[ -z $PM_NUMBER ]]; then PM_NUMBER=1; fi;
|
||||
|
||||
# Tracks if flush read lock has been run
|
||||
read_lock=false
|
||||
columnstore_online=false
|
||||
confirm_xmllint_installed
|
||||
# Number of DBroots
|
||||
# Integer usually 1 or 3
|
||||
DBROOT_COUNT=$(xmllint --xpath "string(//DBRootCount)" $CS_CONFIGS_PATH/Columnstore.xml)
|
||||
ASSIGNED_DBROOT=$(xmllint --xpath "string(//ModuleDBRootID$PM_NUMBER-1-3)" $CS_CONFIGS_PATH/Columnstore.xml)
|
||||
|
||||
# defaults affecting issue_write_locks but dont mean anything for dbrm_backup
|
||||
skip_mdb=false
|
||||
mode="direct"
|
||||
}
|
||||
|
||||
load_default_dbrm_restore_variables() {
|
||||
# Fixed Paths
|
||||
CS_CONFIGS_PATH="/etc/columnstore"
|
||||
DBRM_PATH="/var/lib/columnstore/data1/systemFiles/dbrm"
|
||||
STORAGEMANAGER_PATH="/var/lib/columnstore/storagemanager"
|
||||
STORAGEMANGER_CNF="$CS_CONFIGS_PATH/storagemanager.cnf"
|
||||
|
||||
auto_start=true
|
||||
backup_location="/tmp/dbrm_backups"
|
||||
STORAGEMANGER_CNF="/etc/columnstore/storagemanager.cnf"
|
||||
storage=$(grep -m 1 "^service = " $STORAGEMANGER_CNF | awk '{print $3}')
|
||||
backup_folder_to_restore=""
|
||||
skip_dbrm_backup=false
|
||||
skip_storage_manager=false
|
||||
list_dbrm_backups=false
|
||||
|
||||
dbrm_dir="/var/lib/columnstore/data1/systemFiles/dbrm"
|
||||
if [ "$storage" == "S3" ]; then
|
||||
dbrm_dir="/var/lib/columnstore/storagemanager"
|
||||
DBRM_PATH="/var/lib/columnstore/storagemanager"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -3032,13 +3166,19 @@ print_dbrm_backup_help_text() {
|
||||
echo "
|
||||
Columnstore DBRM Backup
|
||||
|
||||
-m | --mode ['loop','once']; Determines if this script runs in a forever loop sleeping -i minutes or just once
|
||||
-i | --interval Number of minutes to sleep when --mode loop
|
||||
-r | --retention-days Retain dbrm backups created within the last X days, the rest are deleted
|
||||
-bl | --backup-location Path of where to save the dbrm backups on disk
|
||||
-nb | --name-backup Define the prefix of the backup - default: dbrm_backup+date +%Y%m%d_%H%M%S
|
||||
-ssm | --skip-storage-manager skip backing up storagemanager directory
|
||||
-li | --list List available dbrm backups in the backup location
|
||||
-m | --mode ['loop','once']; Determines if this script runs in a forever loop sleeping -i minutes or just once
|
||||
-i | --interval Number of minutes to sleep when --mode loop
|
||||
-r | --retention-days Retain dbrm backups created within the last X days, the rest are deleted
|
||||
-bl | --backup-location Path of where to save the dbrm backups on disk
|
||||
-nb | --name-backup Define the prefix of the backup - default: dbrm_backup+date +%Y%m%d_%H%M%S
|
||||
-ssm | --skip-storage-manager Skip backing up storagemanager directory
|
||||
-sbrm | --skip-save-brm Skip saving brm prior to running a dbrm backup - ideal for dirty backups
|
||||
-slock | --skip-locks Skip issuing flush read locks to dbrms
|
||||
-spoll | --skip-polls Skip polling to confirm locks are released
|
||||
-pi | --poll-interval Number of seconds to wait between polls to confirm
|
||||
-pmw | --poll-max-wait Max number of minutes for polling checks for writes to wait before exiting as a failed dbrm backup attempt
|
||||
-q | --quiet Suppress non-error output
|
||||
-li | --list List available dbrm backups in the backup location
|
||||
|
||||
Default: ./$0 dbrm_backup -m once --retention-days 0 --backup-location /tmp/dbrm_backups
|
||||
|
||||
@@ -3099,7 +3239,7 @@ parse_dbrms_variables() {
|
||||
shift # past value
|
||||
;;
|
||||
-m|--mode)
|
||||
mode="$2"
|
||||
dbrm_backup_mode="$2"
|
||||
shift # past argument
|
||||
shift # past value
|
||||
;;
|
||||
@@ -3116,6 +3256,28 @@ parse_dbrms_variables() {
|
||||
quiet=true
|
||||
shift # past argument
|
||||
;;
|
||||
-sbrm| --skip-save-brm)
|
||||
skip_save_brm=true
|
||||
shift # past argument
|
||||
;;
|
||||
-slock| --skip-locks)
|
||||
skip_locks=true
|
||||
shift # past argument
|
||||
;;
|
||||
-spoll| --skip-polls)
|
||||
skip_polls=true
|
||||
shift # past argument
|
||||
;;
|
||||
-pi| --poll-interval)
|
||||
poll_interval="$2"
|
||||
shift # past argument
|
||||
shift # past value
|
||||
;;
|
||||
-pmw| --poll-max-wait)
|
||||
poll_max_wait="$2"
|
||||
shift # past argument
|
||||
shift # past value
|
||||
;;
|
||||
-li | --list)
|
||||
list_dbrm_backups=true
|
||||
shift # past argument
|
||||
@@ -3132,6 +3294,9 @@ parse_dbrms_variables() {
|
||||
done
|
||||
|
||||
confirm_integer_else_fail "$retention_days" "Retention"
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
parse_dbrm_restore_variables() {
|
||||
@@ -3215,19 +3380,19 @@ confirm_numerical_or_decimal_else_fail() {
|
||||
}
|
||||
|
||||
validation_prechecks_for_dbrm_backup() {
|
||||
|
||||
echo "Prechecks"
|
||||
# Confirm storage not empty
|
||||
if [ -z "$storage" ]; then printf "[!] Empty storage: \ncheck: grep -m 1 \"^service = \" \$STORAGEMANGER_CNF | awk '{print \$3}' \n\n"; fi;
|
||||
|
||||
# Check mode type
|
||||
errors=""
|
||||
case $mode in
|
||||
case $dbrm_backup_mode in
|
||||
once)
|
||||
errors+="" ;;
|
||||
loop)
|
||||
errors+="" ;;
|
||||
*) # unknown option
|
||||
printf "\nunknown mode: $mode\n"
|
||||
printf "\nunknown mode: $dbrm_backup_mode\n"
|
||||
printf "Only 'once' & 'loop' allowed\n\n"
|
||||
print_dbrm_backup_help_text
|
||||
exit 2;
|
||||
@@ -3236,6 +3401,24 @@ validation_prechecks_for_dbrm_backup() {
|
||||
# Check numbers
|
||||
confirm_numerical_or_decimal_else_fail "$backup_interval_minutes" "Interval"
|
||||
|
||||
# Poll Variable Checks
|
||||
confirm_integer_else_fail "$poll_interval" "poll_interval"
|
||||
confirm_integer_else_fail "$poll_max_wait" "poll_max_wait"
|
||||
max_poll_attempts=$((poll_max_wait * 60 / poll_interval))
|
||||
if [ "$max_poll_attempts" -lt 1 ]; then max_poll_attempts=1; fi;
|
||||
|
||||
# Detect if columnstore online
|
||||
if [ "$mode" != "direct" ] && [ "$mode" != "indirect" ] ; then printf "\n[!!!] Invalid field --mode: $mode\n"; exit 1; fi
|
||||
if [ $mode == "direct" ]; then
|
||||
if [ -z $(pidof PrimProc) ] || [ -z $(pidof WriteEngineServer) ]; then
|
||||
printf " - Columnstore is OFFLINE \n";
|
||||
export columnstore_online=false;
|
||||
else
|
||||
printf " - Columnstore is ONLINE - safer if offline (but not required to be offline) \n";
|
||||
export columnstore_online=true;
|
||||
fi
|
||||
fi;
|
||||
|
||||
# Check backup location exists
|
||||
if [ ! -d $backup_location ]; then
|
||||
echo "Created: $backup_location"
|
||||
@@ -3396,23 +3579,28 @@ process_dbrm_backup() {
|
||||
|
||||
load_default_dbrm_variables
|
||||
parse_dbrms_variables "$@";
|
||||
|
||||
handle_list_dbrm_backups
|
||||
|
||||
|
||||
if ! $quiet ; then
|
||||
|
||||
printf "\nDBRM Backup\n";
|
||||
echo "--------------------------------------------------------------------------"
|
||||
echo "Skips: save_brm($skip_save_brm) Locks($skip_locks) Polls($skip_polls) "
|
||||
if [ "$storage" == "S3" ]; then echo "Skips: Storagemanager($skip_storage_manager)"; fi;
|
||||
echo "--------------------------------------------------------------------------"
|
||||
printf "CS Storage: $storage\n";
|
||||
printf "Source: $dbrm_dir\n";
|
||||
printf "Backups: $backup_location\n";
|
||||
printf "CS Storage: $storage\n";
|
||||
printf "Source: $DBRM_PATH\n";
|
||||
printf "Backups: $backup_location\n";
|
||||
if [ "$mode" == "loop" ]; then
|
||||
printf "Interval: $backup_interval_minutes minutes\n";
|
||||
printf "Interval: $backup_interval_minutes minutes\n";
|
||||
fi;
|
||||
printf "Retention: $retention_days day(s)\n"
|
||||
printf "Mode: $mode\n"
|
||||
printf "Retention: $retention_days day(s)\n"
|
||||
printf "Mode: $dbrm_backup_mode\n"
|
||||
if ! $skip_polls && ! $skip_locks ; then
|
||||
printf "Poll Interval: $poll_interval seconds\n"
|
||||
printf "Poll Max Wait: $poll_max_wait seconds\n"
|
||||
fi;
|
||||
|
||||
echo "--------------------------------------------------------------------------"
|
||||
fi;
|
||||
|
||||
@@ -3424,17 +3612,22 @@ process_dbrm_backup() {
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
backup_folder="$backup_location/${backup_base_name}_${timestamp}"
|
||||
mkdir -p "$backup_folder"
|
||||
|
||||
issue_write_locks;
|
||||
run_save_brm
|
||||
trap handle_ctrl_c_dbrm_backup SIGINT
|
||||
|
||||
printf "\nDBRM Backup\n"
|
||||
# Copy files to the backup directory
|
||||
if [[ $skip_storage_manager == false || $storage == "LocalStorage" ]]; then
|
||||
print_if_not_quiet " - copying $dbrm_dir ...";
|
||||
cp -arp "$dbrm_dir"/* "$backup_folder"
|
||||
print_if_not_quiet " - Copying $DBRM_PATH ...";
|
||||
cp -arp "$DBRM_PATH"/* "$backup_folder"
|
||||
print_if_not_quiet " Done\n";
|
||||
fi
|
||||
|
||||
if [ "$storage" == "S3" ]; then
|
||||
# smcat em files to disk
|
||||
print_if_not_quiet " - copying DBRMs from bucket ...";
|
||||
print_if_not_quiet " - Copying DBRMs from bucket ...";
|
||||
mkdir $backup_folder/dbrms/
|
||||
smls /data1/systemFiles/dbrm 2>/dev/null > $backup_folder/dbrms/dbrms.txt
|
||||
smcat /data1/systemFiles/dbrm/BRM_saves_current 2>/dev/null > $backup_folder/dbrms/BRM_saves_current
|
||||
@@ -3462,9 +3655,10 @@ process_dbrm_backup() {
|
||||
print_if_not_quiet " Done\n";
|
||||
fi;
|
||||
|
||||
printf "Created: $backup_folder\n"
|
||||
|
||||
if [ "$mode" == "once" ]; then
|
||||
clear_read_lock
|
||||
printf "\nCreated: $backup_folder\n"
|
||||
|
||||
if [ "$dbrm_backup_mode" == "once" ]; then
|
||||
end=$(date +%s)
|
||||
runtime=$((end-start))
|
||||
if ! $quiet; then printf "\nRuntime: $runtime\n"; fi;
|
||||
@@ -3837,12 +4031,12 @@ process_s3_dbrm_restore() {
|
||||
|
||||
printf "\nPreparing\n"
|
||||
printf "%-${printf_offset}s ..." " - Clearing storagemanager caches"
|
||||
if [ ! -d "$dbrm_dir/cache" ]; then
|
||||
echo "Directory $dbrm_dir/cache does not exist."
|
||||
if [ ! -d "$DBRM_PATH/cache" ]; then
|
||||
echo "Directory $DBRM_PATH/cache does not exist."
|
||||
exit 1
|
||||
fi
|
||||
for cache_dir in "${dbrm_dir}/cache"/*; do
|
||||
if [ -d "${dbrm_dir}/cache/${cache_dir}" ]; then
|
||||
for cache_dir in "${DBRM_PATH}/cache"/*; do
|
||||
if [ -d "${DBRM_PATH}/cache/${cache_dir}" ]; then
|
||||
echo " - Removing Cache: $cache_dir"
|
||||
else
|
||||
printf "."
|
||||
@@ -3945,32 +4139,32 @@ process_localstorage_dbrm_restore() {
|
||||
|
||||
printf "\nBefore DBRMs Restore\n"
|
||||
echo "--------------------------------------------------------------------------"
|
||||
ls -la "${dbrm_dir}" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current"
|
||||
ls -la "${DBRM_PATH}" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current"
|
||||
printf " - Clearing active DBRMs ... "
|
||||
if rm -rf $dbrm_dir ; then
|
||||
if rm -rf $DBRM_PATH ; then
|
||||
printf "Done\n"
|
||||
else
|
||||
echo "Failed to delete files in $dbrm_dir "
|
||||
echo "Failed to delete files in $DBRM_PATH "
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
printf "\nRestoring DBRMs\n"
|
||||
echo "--------------------------------------------------------------------------"
|
||||
printf " - Desired EM: $em_file_full_path\n"
|
||||
printf " - Copying DBRMs: \"${backup_location}/${backup_folder_to_restore_dbrms}\" -> \"$dbrm_dir\" \n"
|
||||
cp -arp "${backup_location}/${backup_folder_to_restore_dbrms}" $dbrm_dir
|
||||
printf " - Copying DBRMs: \"${backup_location}/${backup_folder_to_restore_dbrms}\" -> \"$DBRM_PATH\" \n"
|
||||
cp -arp "${backup_location}/${backup_folder_to_restore_dbrms}" $DBRM_PATH
|
||||
|
||||
|
||||
if [ "$prefix" != "BRM_saves" ]; then
|
||||
printf " - Restoring Prefix: $prefix \n"
|
||||
vbbm_name="${prefix}_vbbm"
|
||||
vss_name="${prefix}_vss"
|
||||
cp -arpf "${dbrm_dir}/$em_file_name" "${dbrm_dir}/BRM_saves_em"
|
||||
cp -arpf "${dbrm_dir}/$vbbm_name" "${dbrm_dir}/BRM_saves_vbbm"
|
||||
cp -arpf "${dbrm_dir}/$vss_name" "${dbrm_dir}/BRM_saves_vss"
|
||||
cp -arpf "${DBRM_PATH}/$em_file_name" "${DBRM_PATH}/BRM_saves_em"
|
||||
cp -arpf "${DBRM_PATH}/$vbbm_name" "${DBRM_PATH}/BRM_saves_vbbm"
|
||||
cp -arpf "${DBRM_PATH}/$vss_name" "${DBRM_PATH}/BRM_saves_vss"
|
||||
fi
|
||||
echo "BRM_saves" > "${dbrm_dir}/BRM_saves_current"
|
||||
chown -R mysql:mysql "${dbrm_dir}"
|
||||
echo "BRM_saves" > "${DBRM_PATH}/BRM_saves_current"
|
||||
chown -R mysql:mysql "${DBRM_PATH}"
|
||||
clearShm
|
||||
sleep 2
|
||||
|
||||
@@ -3978,7 +4172,7 @@ process_localstorage_dbrm_restore() {
|
||||
|
||||
printf "\nAfter DBRM Restore\n"
|
||||
echo "--------------------------------------------------------------------------"
|
||||
ls -la "${dbrm_dir}" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current"
|
||||
ls -la "${DBRM_PATH}" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current"
|
||||
|
||||
if $auto_start; then
|
||||
printf "\nStartup\n"
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
[Unit]
|
||||
Description=${CPACK_PACKAGE_DESCRIPTION_SUMMARY}
|
||||
After=network.target network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Environment=PYTHONPATH=${CMAPI_DIR}/deps
|
||||
|
||||
@@ -815,6 +815,12 @@ void gp_walk(const Item* item, void* arg)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up allocated objects if a fatal parse error occurred
|
||||
if (gwip->fatalParseError)
|
||||
{
|
||||
clearDeleteStacks(*gwip);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user