From c11cadeef5d97874d09d24cf2f75a1c2e1a3d8d4 Mon Sep 17 00:00:00 2001 From: Alan Mologorsky <89034356+mariadb-AlanMologorsky@users.noreply.github.com> Date: Thu, 27 Jun 2024 16:23:13 +0300 Subject: [PATCH] MCOL-5618-related: Duplicate to stable 23.10 (#3223) * feat(package_manager): improved cs_package_manager functionality for better UX - add apt support for community and enterprise install - add enterprise_staging install - filter check results down to os/arch - add --token flag for enterprise (#3002) * Update cs package manager dev ubuntu (#3060) * cs_package_manager.sh - fix ubuntu dev installs * cs_package_manager.sh - remove some useless comments * debian/ubuntu dev install to use drone repo + remove wget dependancy * + exits if repo files missing * auto install aws cli for dev builds * removed commented section from cs_package_manager * feat(tooling): add rhel support in cs_package_manager.sh (#3105) * added dbrm_restore for LocalStorage & S3 * small comment cleanup * resolved PR comments for cs_package_manager.sh & mcs_backup_manager.sh * added dbrm_restore for LocalStorage & S3 * Added backup retention flag -r and incremental auto_most_recent option to mcs_backup_manager.sh * resolved PR comments for cs_package_manager.sh & mcs_backup_manager.sh * add comment to auto_select_most_recent_backup_for_incremental in mcs_backup_manager.sh * feat(cmapi,cli): MCOL-5618: Add backup, restore, backup-dbrm, restore-dbrm commands for mcs cli tool. (#3130) * MCOL-5618: Add backup, restore, backup-dbrm, restore-dbrm commands for mcs cli tool. [add] mcs_cluster_tool/helpers.py file with cook_sh_arg function inside [add] MCS_BACKUP_MANAGER_SH to mcs_cluster_tool/constants.py [add] backup and restore commands to "mcs" Typer APP * MCOL-5618: Move mcs_backup_manager.sh to cmapi/scripts. * MCOL-5618: Install mcs_backup_manager.sh with CMAPI package. * MCOL-5618: Testing fix + script minor fixes. - [fix] wrong cooking of .sh invoke string - [fix] restore_dbrm -> dbrm_restore, backup_dbrm -> dbrm_backup to use same naming in both python wrapper and mcs_backup_manager.sh - [fix] mathod of checking cmapi online or not in mcs_backup_manager.sh * MCOL-5618: Minor improvements to sync with PR 3144. [add] -ns option for dbrm_restore. * MCOL-5618: Minor fixes to sync after #3147 [add] -r option for backup command * MCOL-5618: Minor fixes to sync after PR #1398. [add] -ssm argument for dbrm backup [add] -sdbk -ssm arguments for dbrm restore [add] silenece annoying pylint warning unused argument for bachup_commands.py and restore_commnads.py * update to the latest mcs_backup_manager + cs_package_manager * Intermediate commit to move changes from old filepath to the new one. * remove old file * MCOL-5618: Fix packaging. [mv] extra/cs_package_manager.sh -> cmapi/scripts/cs_package_manager.sh to pack and deliver the script with cmapi package [add] cs_package_manager.sh to cmapi package [fix] download paths for mcs_backup_manager.sh and cs_package_manager.sh * MCOL-5618: Fix after testing [add] -P to restore help in mcs_backup_restore.sh [fix] -f argument default value in both backup and restore wrapper commands [fix] -i argument behaviour and type + default value [fix] -P argument position and help for both backup and restore wrapper commands [fix] disable some pylint warning for both backup and restore wrapper files --------- Co-authored-by: Allen Herrera <82840027+mariadb-AllenHerrera@users.noreply.github.com> Co-authored-by: Allen Herrera Co-authored-by: Allen Herrera Co-authored-by: Leonid Fedorov <79837786+mariadb-LeonidFedorov@users.noreply.github.com> --- cmapi/CMakeLists.txt | 3 + cmapi/mcs_cluster_tool/__main__.py | 14 +- cmapi/mcs_cluster_tool/backup_commands.py | 353 +++ cmapi/mcs_cluster_tool/constants.py | 5 + cmapi/mcs_cluster_tool/helpers.py | 29 + cmapi/mcs_cluster_tool/restore_commands.py | 324 +++ cmapi/scripts/cs_package_manager.sh | 2095 +++++++++++++++++ .../scripts}/mcs_backup_manager.sh | 1813 +++++++++----- extra/cs_package_manager.sh | 839 ------- 9 files changed, 4053 insertions(+), 1422 deletions(-) create mode 100644 cmapi/mcs_cluster_tool/backup_commands.py create mode 100644 cmapi/mcs_cluster_tool/helpers.py create mode 100644 cmapi/mcs_cluster_tool/restore_commands.py create mode 100644 cmapi/scripts/cs_package_manager.sh rename {extra => cmapi/scripts}/mcs_backup_manager.sh (71%) delete mode 100644 extra/cs_package_manager.sh diff --git a/cmapi/CMakeLists.txt b/cmapi/CMakeLists.txt index e88f68d32..b546560ed 100644 --- a/cmapi/CMakeLists.txt +++ b/cmapi/CMakeLists.txt @@ -84,6 +84,9 @@ INSTALL(FILES mcs_aws INSTALL(FILES mcs_gsutil PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ DESTINATION ${BIN_DIR}) +INSTALL(FILES scripts/mcs_backup_manager.sh scripts/cs_package_manager.sh + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + DESTINATION ${BIN_DIR}) OPTION(RPM "Build an RPM" OFF) IF(RPM) diff --git a/cmapi/mcs_cluster_tool/__main__.py b/cmapi/mcs_cluster_tool/__main__.py index 3b433ecfc..a246bc304 100644 --- a/cmapi/mcs_cluster_tool/__main__.py +++ b/cmapi/mcs_cluster_tool/__main__.py @@ -4,7 +4,9 @@ import sys import typer from cmapi_server.logging_management import dict_config, add_logging_level -from mcs_cluster_tool import cluster_app, cmapi_app +from mcs_cluster_tool import ( + cluster_app, cmapi_app, backup_commands, restore_commands +) from mcs_cluster_tool.constants import MCS_CLI_LOG_CONF_PATH @@ -16,11 +18,15 @@ app = typer.Typer( 'MCS services' ), ) -app.add_typer(cluster_app.app, name="cluster") -app.add_typer(cmapi_app.app, name="cmapi") +app.add_typer(cluster_app.app, name='cluster') +app.add_typer(cmapi_app.app, name='cmapi') +app.command()(backup_commands.backup) +app.command('backup-dbrm')(backup_commands.dbrm_backup) +app.command()(restore_commands.restore) +app.command('restore-dbrm')(restore_commands.dbrm_restore) -if __name__ == "__main__": +if __name__ == '__main__': add_logging_level('TRACE', 5) #TODO: remove when stadalone mode added. dict_config(MCS_CLI_LOG_CONF_PATH) logger = logging.getLogger('mcs_cli') diff --git a/cmapi/mcs_cluster_tool/backup_commands.py b/cmapi/mcs_cluster_tool/backup_commands.py new file mode 100644 index 000000000..c0679ded5 --- /dev/null +++ b/cmapi/mcs_cluster_tool/backup_commands.py @@ -0,0 +1,353 @@ +"""Typer application for backup Columnstore data.""" +import logging +import sys +from datetime import datetime +from typing_extensions import Annotated + +import typer + +from cmapi_server.process_dispatchers.base import BaseDispatcher +from mcs_cluster_tool.constants import MCS_BACKUP_MANAGER_SH +from mcs_cluster_tool.decorators import handle_output +from mcs_cluster_tool.helpers import cook_sh_arg + + +logger = logging.getLogger('mcs_cli') +# pylint: disable=unused-argument, too-many-arguments, too-many-locals +# pylint: disable=invalid-name, line-too-long + + +@handle_output +def backup( + bl: Annotated[ + str, + typer.Option( + '-bl', '--backup-location', + help=( + 'What directory to store the backups on this machine or the target machine.\n' + 'Consider write permissions of the scp user and the user running this script.\n' + 'Mariadb-backup will use this location as a tmp dir for S3 and remote backups temporarily.\n' + 'Example: /mnt/backups/' + ) + ) + ] = '/tmp/backups/', + bd: Annotated[ + str, + typer.Option( + '-bd', '--backup-destination', + help=( + 'Are the backups going to be stored on the same machine this ' + 'script is running on or another server - if Remote you need ' + 'to setup scp=' + 'Options: "Local" or "Remote"' + ) + ) + ] = 'Local', + scp: Annotated[ + str, + typer.Option( + '-scp', + help=( + 'Used only if --backup-destination="Remote".\n' + 'The user/credentials that will be used to scp the backup ' + 'files\n' + 'Example: "centos@10.14.51.62"' + ) + ) + ] = '', + bb: Annotated[ + str, + typer.Option( + '-bb', '--backup-bucket', + help=( + 'Only used if --storage=S3\n' + 'Name of the bucket to store the columnstore backups.\n' + 'Example: "s3://my-cs-backups"' + ) + ) + ] = '', + url: Annotated[ + str, + typer.Option( + '-url', '--endpoint-url', + help=( + 'Used by on premise S3 vendors.\n' + 'Example: "http://127.0.0.1:8000"' + ) + ) + ] = '', + nv_ssl: Annotated[ + bool, + typer.Option( + '-nv-ssl/-v-ssl','--no-verify-ssl/--verify-ssl', + help='Skips verifying ssl certs, useful for onpremise s3 storage.' + ) + ] = False, + s: Annotated[ + str, + typer.Option( + '-s', '--storage', + help=( + 'What storage topogoly is being used by Columnstore - found ' + 'in /etc/columnstore/storagemanager.cnf.\n' + 'Options: "LocalStorage" or "S3"' + ) + ) + ] = 'LocalStorage', + i: Annotated[ + str, + typer.Option( + '-i', '--incremental', + help=( + 'Adds columnstore deltas to an existing full backup. ' + 'Backup folder to apply increment could be a value or ' + '"auto_most_recent" - the incremental backup applies to ' + 'last full backup.' + ), + show_default=False + ) + ] = '', + ha: Annotated[ + bool, + typer.Option( + '-ha/-no-ha', '--highavilability/--no-highavilability', + help=( + 'Hint wether shared storage is attached @ below on all nodes ' + 'to see all data\n' + ' HA LocalStorage ( /var/lib/columnstore/dataX/ )\n' + ' HA S3 ( /var/lib/columnstore/storagemanager/ )' + ) + ) + ] = False, + f: Annotated[ + str, + typer.Option( + '-f', '--config-file', + help='Path to backup configuration file to load variables from.', + show_default=False + ) + ] = '', + sbrm: Annotated[ + bool, + typer.Option( + '-sbrm/-no-sbrm', '--skip-save-brm/--no-skip-save-brm', + help=( + 'Skip saving brm prior to running a backup - ' + 'ideal for dirty backups.' + ) + ) + ] = False, + spoll: Annotated[ + bool, + typer.Option( + '-spoll/-no-spoll', '--skip-polls/--no-skip-polls', + help='Skip sql checks confirming no write/cpimports running.' + ) + ] = False, + slock: Annotated[ + bool, + typer.Option( + '-slock/-no-slock', '--skip-locks/--no-skip-locks', + help='Skip issuing write locks - ideal for dirty backups.' + ) + ] = False, + smdb: Annotated[ + bool, + typer.Option( + '-smdb/-no-smdb', '--skip-mariadb-backup/--no-skip-mariadb-backup', + help=( + 'Skip running a mariadb-backup for innodb data - ideal for ' + 'incremental dirty backups.' + ) + ) + ] = False, + sb: Annotated[ + bool, + typer.Option( + '-sb/-no-sb', '--skip-bucket-data/--no-skip-bucket-data', + help='Skip taking a copy of the columnstore data in the bucket.' + ) + ] = False, + pi: Annotated[ + int, + typer.Option( + '-pi', '--poll-interval', + help=( + 'Number of seconds between poll checks for active writes & ' + 'cpimports.' + ) + ) + ] = 5, + pmw: Annotated[ + int, + typer.Option( + '-pmw', '--poll-max-wait', + help=( + 'Max number of minutes for polling checks for writes to wait ' + 'before exiting as a failed backup attempt.' + ) + ) + ] = 60, + q: Annotated[ + bool, + typer.Option( + '-q/-no-q', '--quiet/--no-quiet', + help='Silence verbose copy command outputs.' + ) + ] = False, + c: Annotated[ + str, + typer.Option( + '-c', '--compress', + help='Compress backup in X format - Options: [ pigz ].', + show_default=False + ) + ] = '', + P: Annotated[ + int, + typer.Option( + '-P', '--parallel', + help=( + 'Determines if columnstore data directories will have ' + 'multiple rsync running at the same time for different ' + 'subfolders to parallelize writes. ' + 'Ignored if "-c/--compress" argument not set.' + ) + ) + ] = 4, + nb: Annotated[ + str, + typer.Option( + '-nb', '--name-backup', + help='Define the name of the backup - default: $(date +%m-%d-%Y)' + ) + ] = datetime.now().strftime('%m-%d-%Y'), + m: Annotated[ + str, + typer.Option( + '-m', '--mode', + help=( + 'Modes ["direct","indirect"] - direct backups run on the ' + 'columnstore nodes themselves. indirect run on another ' + 'machine that has read-only mounts associated with ' + 'columnstore/mariadb\n' + ), + hidden=True + ) + ] = 'direct', + r: Annotated[ + int, + typer.Option( + '-r', '--retention-days', + help=( + 'Retain backups created within the last X days, ' + 'default 0 == keep all backups.' + ) + ) + ] = 0, +): + """Backup Columnstore and/or MariDB data.""" + + # Local Storage Examples: + # ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage + # ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage -P 8 + # ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage --incremental 02-18-2022 + # ./$0 backup -bl /tmp/backups/ -bd Remote -scp root@172.31.6.163 -s LocalStorage + + # S3 Examples: + # ./$0 backup -bb s3://my-cs-backups -s S3 + # ./$0 backup -bb s3://my-cs-backups -c pigz --quiet -sb + # ./$0 backup -bb gs://my-cs-backups -s S3 --incremental 02-18-2022 + # ./$0 backup -bb s3://my-onpremise-bucket -s S3 -url http://127.0.0.1:8000 + + # Cron Example: + # */60 */24 * * * root bash /root/$0 -bb s3://my-cs-backups -s S3 >> /root/csBackup.log 2>&1 + + arguments = [] + for arg_name, value in locals().items(): + sh_arg = cook_sh_arg(arg_name, value) + if sh_arg is None: + continue + arguments.append(sh_arg) + cmd = f'{MCS_BACKUP_MANAGER_SH} backup {" ".join(arguments)}' + success, _ = BaseDispatcher.exec_command(cmd, stdout=sys.stdout) + return {'success': success} + + +@handle_output +def dbrm_backup( + m: Annotated[ + str, + typer.Option( + '-m', '--mode', + help=( + '"loop" or "once" ; Determines if this script runs in a ' + 'forever loop sleeping -i minutes or just once.' + ), + ) + ] = 'once', + i: Annotated[ + int, + typer.Option( + '-i', '--interval', + help='Number of minutes to sleep when --mode=loop.' + ) + ] = 90, + r: Annotated[ + int, + typer.Option( + '-r', '--retention-days', + help=( + 'Retain dbrm backups created within the last X days, ' + 'the rest are deleted' + ) + ) + ] = 7, + p: Annotated[ + str, + typer.Option( + '-p', '--path', + help='Path of where to save the dbrm backups on disk.' + ) + ] = '/tmp/dbrm_backups', + nb: Annotated[ + str, + typer.Option( + '-nb', '--name-backup', + help='Custom name to prefex dbrm backups with.' + ) + ] = 'dbrm_backup', + q: Annotated[ + bool, + typer.Option( + '-q/-no-q', '--quiet/--no-quiet', + help='Silence verbose copy command outputs.' + ) + ] = False, + ssm: Annotated[ + bool, + typer.Option( + '-ssm/-no-ssm', '--skip-storage-manager/--no-skip-storage-manager', + help='Skip backing up storagemanager directory.' + ) + ] = False, +): + """Columnstore DBRM Backup.""" + + # Default: ./$0 dbrm_backup -m once --retention-days 7 --path /tmp/dbrm_backups + + # Examples: + # ./$0 dbrm_backup --mode loop --interval 90 --retention-days 7 --path /mnt/dbrm_backups + # ./$0 dbrm_backup --mode once --retention-days 7 --path /mnt/dbrm_backups -nb my-one-off-backup + + # Cron Example: + # */60 */3 * * * root bash /root/$0 dbrm_backup -m once --retention-days 7 --path /tmp/dbrm_backups >> /tmp/dbrm_backups/cs_backup.log 2>&1 + arguments = [] + for arg_name, value in locals().items(): + sh_arg = cook_sh_arg(arg_name, value) + if sh_arg is None: + continue + arguments.append(sh_arg) + cmd = f'{MCS_BACKUP_MANAGER_SH} dbrm_backup {" ".join(arguments)}' + success, _ = BaseDispatcher.exec_command(cmd, stdout=sys.stdout) + return {'success': success} diff --git a/cmapi/mcs_cluster_tool/constants.py b/cmapi/mcs_cluster_tool/constants.py index 796259ff5..ec988175c 100644 --- a/cmapi/mcs_cluster_tool/constants.py +++ b/cmapi/mcs_cluster_tool/constants.py @@ -1,4 +1,9 @@ import os +from cmapi_server.constants import MCS_INSTALL_BIN + + MCS_CLI_ROOT_PATH = os.path.dirname(__file__) MCS_CLI_LOG_CONF_PATH = os.path.join(MCS_CLI_ROOT_PATH, 'mcs_cli_log.conf') + +MCS_BACKUP_MANAGER_SH = os.path.join(MCS_INSTALL_BIN, 'mcs_backup_manager.sh') diff --git a/cmapi/mcs_cluster_tool/helpers.py b/cmapi/mcs_cluster_tool/helpers.py new file mode 100644 index 000000000..5aceba756 --- /dev/null +++ b/cmapi/mcs_cluster_tool/helpers.py @@ -0,0 +1,29 @@ +"""Module with helper functions for mcs cli tool.""" +from typing import Union + + +def cook_sh_arg(arg_name: str, value:Union[str, int, bool]) -> str: + """Convert argument and and value from function locals to bash argument. + + :param arg_name: function argument name + :type arg_name: str + :param value: function argument value + :type value: Union[str, int, bool] + :return: bash argument string + :rtype: str + """ + # skip "arguments" list and Typer ctx variables from local scope + if arg_name in ('arguments', 'ctx'): + return None + # skip args that have empty string as value + if value == '': + return None + if '_' in arg_name: + arg_name = arg_name.replace('_', '-') + # skip boolean args that have False value + if isinstance(value, bool): + if not value: + return None + # if True value presented just pass only arg name without value + value = '' + return f'-{arg_name} {value}' if value else f'-{arg_name}' diff --git a/cmapi/mcs_cluster_tool/restore_commands.py b/cmapi/mcs_cluster_tool/restore_commands.py new file mode 100644 index 000000000..e4881f917 --- /dev/null +++ b/cmapi/mcs_cluster_tool/restore_commands.py @@ -0,0 +1,324 @@ +"""Typer application for restore Columnstore data.""" +import logging +import sys +from typing_extensions import Annotated + +import typer + +from cmapi_server.process_dispatchers.base import BaseDispatcher +from mcs_cluster_tool.constants import MCS_BACKUP_MANAGER_SH +from mcs_cluster_tool.decorators import handle_output +from mcs_cluster_tool.helpers import cook_sh_arg + + +logger = logging.getLogger('mcs_cli') +# pylint: disable=unused-argument, too-many-arguments, too-many-locals +# pylint: disable=invalid-name, line-too-long + + +@handle_output +def restore( + l: Annotated[ + str, + typer.Option( + '-l', '--load', + help='What date folder to load from the backup_location.' + ) + ] = '', + bl: Annotated[ + str, + typer.Option( + '-bl', '--backup-location', + help=( + 'Where the backup to load is found.\n' + 'Example: /mnt/backups/' + ) + ) + ] = '/tmp/backups/', + bd: Annotated[ + str, + typer.Option( + '-bd', '--backup_destination', + help=( + 'Is this backup on the same or remote server compared to ' + 'where this script is running.\n' + 'Options: "Local" or "Remote"' + ) + ) + ] = 'Local', + scp: Annotated[ + str, + typer.Option( + '-scp', '--secure-copy-protocol', + help=( + 'Used only if --backup-destination=Remote' + 'The user/credentials that will be used to scp the backup files.' + 'Example: "centos@10.14.51.62"' + ) + ) + ] = '', + bb: Annotated[ + str, + typer.Option( + '-bb', '--backup-bucket', + help=( + 'Only used if --storage=S3\n' + 'Name of the bucket to store the columnstore backups.\n' + 'Example: "s3://my-cs-backups"' + ) + ) + ] = '', + url: Annotated[ + str, + typer.Option( + '-url', '--endpoint-url', + help=( + 'Used by on premise S3 vendors.\n' + 'Example: "http://127.0.0.1:8000"' + ) + ) + ] = '', + s: Annotated[ + str, + typer.Option( + '-s', '--storage', + help=( + 'What storage topogoly is being used by Columnstore - found ' + 'in /etc/columnstore/storagemanager.cnf.\n' + 'Options: "LocalStorage" or "S3"' + ) + ) + ] = 'LocalStorage', + dbs: Annotated[ + int, + typer.Option( + '-dbs', '--dbroots', + help='Number of database roots in the backup.' + ) + ] = 1, + pm: Annotated[ + str, + typer.Option( + '-pm', '--nodeid', + help=( + 'Forces the handling of the restore as this node as opposed ' + 'to whats detected on disk.' + ) + ) + ] = '', + nb: Annotated[ + str, + typer.Option( + '-nb', '--new-bucket', + help=( + 'Defines the new bucket to copy the s3 data to from the ' + 'backup bucket. Use -nb if the new restored cluster should ' + 'use a different bucket than the backup bucket itself.' + ) + ) + ] = '', + nr: Annotated[ + str, + typer.Option( + '-nr', '--new-region', + help=( + 'Defines the region of the new bucket to copy the s3 data to ' + 'from the backup bucket.' + ) + ) + ] = '', + nk: Annotated[ + str, + typer.Option( + '-nk', '--new-key', + help='Defines the aws key to connect to the new_bucket.' + ) + ] = '', + ns: Annotated[ + str, + typer.Option( + '-ns', '--new-secret', + help=( + 'Defines the aws secret of the aws key to connect to the ' + 'new_bucket.' + ) + ) + ] = '', + ha: Annotated[ + bool, + typer.Option( + '-ha/-no-ha', '--highavilability/--no-highavilability', + help=( + 'Flag for high available systems (meaning shared storage ' + 'exists supporting the topology so that each node sees ' + 'all data)' + ) + ) + ] = False, + cont: Annotated[ + bool, + typer.Option( + '-cont/-no-cont', '--continue/--no-continue', + help=( + 'This acknowledges data in your --new_bucket is ok to delete ' + 'when restoring S3. When set to true skips the enforcement ' + 'that new_bucket should be empty prior to starting a restore.' + ) + ) + ] = False, + f: Annotated[ + str, + typer.Option( + '-f', '--config-file', + help='Path to backup configuration file to load variables from.', + show_default=False + ) + ] = '', + smdb: Annotated[ + bool, + typer.Option( + '-smdb/-no-smdb', '--skip-mariadb-backup/--no-skip-mariadb-backup', + help=( + 'Skip restoring mariadb server via mariadb-backup - ideal for ' + 'only restoring columnstore.' + ) + ) + ] = False, + sb: Annotated[ + bool, + typer.Option( + '-sb/-no-sb', '--skip-bucket-data/--no-skip-bucket-data', + help=( + 'Skip restoring columnstore data in the bucket - ideal if ' + 'looking to only restore mariadb server.' + ) + ) + ] = False, + m: Annotated[ + str, + typer.Option( + '-m', '--mode', + help=( + 'Modes ["direct","indirect"] - direct backups run on the ' + 'columnstore nodes themselves. indirect run on another ' + 'machine that has read-only mounts associated with ' + 'columnstore/mariadb\n' + ), + hidden=True + ) + ] = 'direct', + c: Annotated[ + str, + typer.Option( + '-c', '--compress', + help=( + 'Hint that the backup is compressed in X format. ' + 'Options: [ pigz ].' + ), + show_default=False + ) + ] = '', + P: Annotated[ + int, + typer.Option( + '-P', '--parallel', + help=( + 'Determines number of decompression and mdbstream threads. ' + 'Ignored if "-c/--compress" argument not set.' + ) + ) + ] = 4, + q: Annotated[ + bool, + typer.Option( + '-q/-no-q', '--quiet/--no-quiet', + help='Silence verbose copy command outputs.' + ) + ] = False, + nv_ssl: Annotated[ + bool, + typer.Option( + '-nv-ssl/-v-ssl','--no-verify-ssl/--verify-ssl', + help='Skips verifying ssl certs, useful for onpremise s3 storage.' + ) + ] = False, +): + """Restore Columnstore (and/or MariaDB) data.""" + + # Local Storage Examples: + # ./$0 restore -s LocalStorage -bl /tmp/backups/ -bd Local -l 12-29-2021 + # ./$0 restore -s LocalStorage -bl /tmp/backups/ -bd Remote -scp root@172.31.6.163 -l 12-29-2021 + + # S3 Storage Examples: + # ./$0 restore -s S3 -bb s3://my-cs-backups -l 12-29-2021 + # ./$0 restore -s S3 -bb gs://on-premise-bucket -l 12-29-2021 -url http://127.0.0.1:8000 + # ./$0 restore -s S3 -bb s3://my-cs-backups -l 08-16-2022 -nb s3://new-data-bucket -nr us-east-1 -nk AKIAxxxxxxx3FHCADF -ns GGGuxxxxxxxxxxnqa72csk5 -ha + arguments = [] + for arg_name, value in locals().items(): + sh_arg = cook_sh_arg(arg_name, value) + if sh_arg is None: + continue + arguments.append(sh_arg) + cmd = f'{MCS_BACKUP_MANAGER_SH} restore {" ".join(arguments)}' + success, _ = BaseDispatcher.exec_command(cmd, stdout=sys.stdout) + return {'success': success} + + +@handle_output +def dbrm_restore( + p: Annotated[ + str, + typer.Option( + '-p', '--path', + help='Path of where dbrm backups stored on disk.' + ) + ] = '/tmp/dbrm_backups', + d: Annotated[ + str, + typer.Option( + '-d', '--directory', + help='Date or directory chose to restore from.' + ) + ] = '', + ns: Annotated[ + bool, + typer.Option( + '-ns', '--no-start', + help=( + 'Do not attempt columnstore startup post dbrm_restore.' + ) + ) + ] = False, + sdbk: Annotated[ + bool, + typer.Option( + '-sdbk/-no-sdbk', '--skip-dbrm-backup/--no-skip-dbrm-backup', + help=( + 'Skip backing up dbrms before restoring.' + ) + ) + ] = True, + ssm: Annotated[ + bool, + typer.Option( + '-ssm/-no-ssm', '--skip-storage-manager/--no-skip-storage-manager', + help='Skip backing up storagemanager directory.' + ) + ] = True, +): + """Restore Columnstore DBRM data.""" + + # Default: ./$0 dbrm_restore --path /tmp/dbrm_backups + + # Examples: + # ./$0 dbrm_restore --path /tmp/dbrm_backups --directory dbrm_backup_20240318_172842 + # ./$0 dbrm_restore --path /tmp/dbrm_backups --directory dbrm_backup_20240318_172842 --no-start + arguments = [] + for arg_name, value in locals().items(): + sh_arg = cook_sh_arg(arg_name, value) + if sh_arg is None: + continue + arguments.append(sh_arg) + cmd = f'{MCS_BACKUP_MANAGER_SH} dbrm_restore {" ".join(arguments)}' + success, _ = BaseDispatcher.exec_command(cmd, stdout=sys.stdout) + return {'success': success} diff --git a/cmapi/scripts/cs_package_manager.sh b/cmapi/scripts/cs_package_manager.sh new file mode 100644 index 000000000..fd164ee7e --- /dev/null +++ b/cmapi/scripts/cs_package_manager.sh @@ -0,0 +1,2095 @@ +#!/bin/bash +# Documentation: bash cs_package_manager.sh help + +# Variables +enterprise_token="" +dev_drone_key="" +cs_pkg_manager_version="3.3" +if [ ! -f /var/lib/columnstore/local/module ]; then pm="pm1"; else pm=$(cat /var/lib/columnstore/local/module); fi; +pm_number=$(echo "$pm" | tr -dc '0-9') +action=$1 + +print_help_text() { + echo "Version $cs_pkg_manager_version + +Example Remove: + bash $0 remove + bash $0 remove all + +Example Install: + bash $0 install [enterprise|community|dev] [version|branch] [build num] --token xxxxxxx + bash $0 install enterprise 10.6.12-8 --token xxxxxx + bash $0 install community 11.1 + bash $0 install dev develop cron/8629 + bash $0 install dev develop-23.02 pull_request/7256 + +Example Check: + bash $0 check community + bash $0 check enterprise +" +} + +wait_cs_down() { + retries=0 + max_number_of_retries=20 + + if ! command -v pgrep &> /dev/null; then + printf "\n[!] pgrep not found. Please install pgrep\n\n" + exit 1; + fi + + # Loop until the maximum number of retries is reached + # printf " - Checking Columnstore Offline ..."; + while [ $retries -lt $max_number_of_retries ]; do + # If columnstore is offline, return + cs_processlist=$(pgrep -f "PrimProc|ExeMgr|DMLProc|DDLProc|WriteEngineServer|StorageManager|controllernode|workernode|save_brm|mcs-loadbrm.py") + if [ -z "$cs_processlist" ]; then + # printf " Done \n"; + mcs_offine=true + return 0 + else + printf "\n[!] Columnstore is ONLINE - waiting 5s to retry, attempt: $retries...\n"; + if (( retries % 5 == 0 )); then + echo "PID List: $cs_processlist" + echo "$(ps aux | grep -E "PrimProc|ExeMgr|DMLProc|DDLProc|WriteEngineServer|StorageManager|controllernode|workernode|save_brm|mcs-loadbrm.py" | grep -v grep) " + fi + sleep 5 + ((retries++)) + fi + done + + # If maximum retries reached, exit with an error + echo "PID List: $cs_processlist" + printf "\n[!!!] Columnstore is still online after $max_number_of_retries retries ... exiting \n\n" + exit 2 +} + +print_and_copy() { + printf " - %-35s ..." "$1" + cp -p $1 $2 + printf " Done\n" +} + +print_and_delete() { + printf " - %-25s ..." "$1" + rm -rf $1 + printf " Done\n" +} + +init_cs_down() { + mcs_offine=false + if [ "$pm_number" == "1" ]; then + if [ -z $(pidof PrimProc) ]; then + # printf "\n[+] Columnstore offline already"; + mcs_offine=true + else + + if is_cmapi_installed ; then + confirm_cmapi_online_and_configured + + # Stop columnstore + printf "%-35s ... " " - Stopping Columnstore Engine" + if command -v mcs &> /dev/null; then + if ! mcs_output=$(mcs cluster stop); then + echo "[!] Failed stopping via mcs ... trying cmapi curl" + stop_cs_cmapi_via_curl + fi + printf "Done - $(date)\n" + + # Handle Errors with exit 0 code + if [ ! -z "$(echo $mcs_output | grep "Internal Server Error")" ];then + stop_cs_via_systemctl_override + fi + else + stop_cs_cmapi_via_curl + fi + else + stop_cs_via_systemctl + fi + fi + fi +} + +init_cs_up(){ + + if [ "$pm_number" == "1" ]; then + if [ -n "$(pidof PrimProc)" ]; then + + num_cs_processlist=$(pgrep -f "PrimProc|ExeMgr|DMLProc|DDLProc|WriteEngineServer|StorageManager|controllernode|workernode|save_brm|mcs-loadbrm.py" | wc -l) + if [ $num_cs_processlist -gt "0" ]; then + printf "%-35s ... $num_cs_processlist processes \n" " - Columnstore Engine Online" + fi + + else + + # Check cmapi installed + if is_cmapi_installed ; then + confirm_cmapi_online_and_configured + + # Start columnstore + printf "%-35s ..." " - Starting Columnstore Engine" + if command -v mcs &> /dev/null; then + if ! mcs_output=$(mcs cluster start); then + echo "[!] Failed starting via mcs ... trying cmapi curl" + start_cs_cmapi_via_curl + fi + printf " Done - $(date)\n" + + else + start_cs_cmapi_via_curl + fi + else + start_cs_via_systemctl + fi + fi + fi +} + +stop_columnstore() { + init_cs_down + wait_cs_down +} + +is_cmapi_installed() { + + if [ -z "$package_manager" ]; then + echo "package_manager: $package_manager" + echo "package_manager is not defined" + exit 1; + fi + + cmapi_installed_command="" + case $package_manager in + yum ) + cmapi_installed_command="yum list installed MariaDB-columnstore-cmapi &> /dev/null;"; + ;; + apt ) + cmapi_installed_command="dpkg-query -s mariadb-columnstore-cmapi &> /dev/null;"; + ;; + *) # unknown option + echo "\npackage manager not implemented: $package_manager\n" + exit 2; + esac + + if eval $cmapi_installed_command ; then + return 0 + else + return 1 + fi +} + +start_cmapi() { + + if ! command -v systemctl &> /dev/null ; then + printf "[!!] shutdown_mariadb_and_cmapi: Cant access systemctl\n\n" + exit 1; + fi + + if is_cmapi_installed ; then + printf "%-35s ..." " - Starting CMAPI" + if systemctl start mariadb-columnstore-cmapi ; then + printf " Done\n" + else + echo "[!!] Failed to start CMAPI" + exit 1; + fi; + fi +} + +stop_cmapi() { + + if ! command -v systemctl &> /dev/null ; then + printf "[!!] shutdown_mariadb_and_cmapi: Cant access systemctl\n\n" + exit 1; + fi + + if is_cmapi_installed ; then + printf "%-35s ..." " - Stopping CMAPI" + if systemctl stop mariadb-columnstore-cmapi ; then + printf " Done\n" + else + echo "[!!] Failed to stop CMAPI" + exit 1; + fi; + fi +} + +# Inputs: +# $1 = version already installed +# $2 = version desired to install +# Returns 0 if $2 is greater, else exit +compare_versions() { + local version1="$1" + local version2="$2" + local exit_message="\n[!] The desired upgrade version: $2 \nis NOT greater than the current installed version of $1\n\n" + + # Split version strings into arrays & remove leading 0 + IFS='._-' read -ra v1_nums <<< "${version1//.0/.}" + IFS='._-' read -ra v2_nums <<< "${version2//.0/.}" + + # Compare each segment of the version numbers + for (( i = 0; i < ${#v1_nums[@]}; i++ )); do + + v1=${v1_nums[i]} + v2=${v2_nums[i]} + #echo "Comparing $v1 & $v2" + if (( v1 > v2 )); then + #echo "Installed version is newer: $version1" + echo -e $exit_message + exit 1 + elif (( v1 < v2 )); then + #echo "Desired version is newer: $version2" + return 0 + fi + done + + # If all segments are equal, versions are the same + #echo "Both versions are the same: $version1" + echo -e $exit_message + exit 1 +} + +is_mariadb_installed() { + + if [ -z "$package_manager" ]; then + echo "package_manager: $package_manager" + echo "package_manager is not defined" + exit 1; + fi + + mariadb_installed_command="" + case $package_manager in + yum ) + mariadb_installed_command="yum list installed MariaDB-server &>/dev/null" + ;; + apt ) + mariadb_installed_command="dpkg-query -s mariadb-server &> /dev/null;" + ;; + *) # unknown option + echo "\nshutdown_mariadb_and_cmapi - package manager not implemented: $package_manager\n" + exit 2; + esac + + if eval $mariadb_installed_command ; then + return 0 + else + return 1 + fi + +} + +start_mariadb() { + if ! command -v systemctl &> /dev/null ; then + printf "[!!] start_mariadb: Cant access systemctl\n\n" + exit 1; + fi + + # Start MariaDB + if is_mariadb_installed ; then + printf "%-35s ..." " - Starting MariaDB Server" + if systemctl start mariadb ; then + printf " Done\n" + else + echo "[!!] Failed to start MariaDB Server" + exit 1; + fi; + fi +} + +stop_mariadb() { + if ! command -v systemctl &> /dev/null ; then + printf "[!!] stop_mariadb: Cant access systemctl\n\n" + exit 1; + fi + + # Stop MariaDB + if is_mariadb_installed ; then + printf "%-35s ..." " - Stopping MariaDB Server" + if systemctl stop mariadb ; then + printf " Done\n" + else + echo "[!!] Failed to stop MariaDB Server" + exit 1; + fi; + fi +} + +do_yum_remove() { + + if ! command -v yum &> /dev/null ; then + printf "[!!] Cant access yum\n" + exit 1; + fi + + printf "Prechecks\n" + init_cs_down + wait_cs_down + stop_mariadb + stop_cmapi + + if command -v clearShm &> /dev/null ; then + clearShm + fi + + printf "\nRemoving packages \n" + + # remove any mdb rpms on disk + if ls MariaDB-*.rpm &>/dev/null; then + print_and_delete "MariaDB-*.rpm" + fi + + # remove all current MDB packages + if yum list installed MariaDB-* &>/dev/null; then + yum remove MariaDB-* -y + fi + + # remove offical & custom yum repos + printf "\nRemoving\n" + print_and_delete "/etc/yum.repos.d/mariadb.repo" + print_and_delete "/etc/yum.repos.d/drone.repo" + + if [ "$2" == "all" ]; then + print_and_delete "/var/lib/mysql/" + print_and_delete "/var/lib/columnstore/" + print_and_delete "/etc/my.cnf.d/*" + print_and_delete "/etc/columnstore/*" + fi; +} + +do_apt_remove() { + + if ! command -v apt &> /dev/null ; then + printf "[!!] Cant access apt\n" + exit 1; + fi + + if ! command -v dpkg-query &> /dev/null ; then + printf "[!!] Cant access dpkg-query\n" + exit 1; + fi + + printf "\n[+] Prechecks \n" + init_cs_down + wait_cs_down + stop_mariadb + stop_cmapi + + if command -v clearShm &> /dev/null ; then + clearShm + fi + + printf "\n[+] Removing packages - $(date) ... \n" + # remove any mdb rpms on disk + if ls mariadb*.deb &>/dev/null; then + print_and_delete "mariadb*.deb" + fi + + # Delete columnstores post uninstall script for debian/ubuntu as it doesnt work + print_and_delete "/var/lib/dpkg/info/mariadb-plugin-columnstore.postrm" + + # remove all current MDB packages + if [ "$(apt list --installed mariadb-* 2>/dev/null | wc -l)" -gt 1 ]; then + if [ "$2" == "all" ]; then + DEBIAN_FRONTEND=noninteractive apt mariadb-plugin-columnstore mariadb-columnstore-cmapi --purge -y + DEBIAN_FRONTEND=noninteractive apt remove --purge -y mariadb-* + else + if ! apt remove mariadb-columnstore-cmapi --purge -y; then + printf "[!!] Failed to remove columnstore \n" + fi + + if ! apt remove mariadb-* -y; then + printf "[!!] Failed to remove the rest of mariadb \n\n" + fi + fi + fi + + if [ "$(apt list --installed mysql-common 2>/dev/null | wc -l)" -gt 1 ]; then + if ! apt remove mysql-common -y; then + printf "[!!] Failed to remove mysql-common \n" + fi + fi + + printf "\n[+] Removing all columnstore files & dirs\n" + if [ "$2" == "all" ]; then + print_and_delete "/var/lib/mysql" + print_and_delete "/var/lib/columnstore" + print_and_delete "/etc/columnstore" + print_and_delete "/etc/mysql/mariadb.conf.d/columnstore.cnf.rpmsave" + fi + # remove offical & custom yum repos + print_and_delete "/lib/systemd/system/mariadb.service" + print_and_delete "/lib/systemd/system/mariadb.service.d" + print_and_delete "/etc/apt/sources.list.d/mariadb.list*" + print_and_delete "/etc/apt/sources.list.d/drone.list" + systemctl daemon-reload + +} + +do_remove() { + + check_operating_system + check_package_managers + + case $distro_info in + centos | rhel | rocky ) + do_yum_remove "$@" + ;; + + ubuntu | debian ) + do_apt_remove "$@" + ;; + *) # unknown option + echo "\ndo_remove: os & version not implemented: $distro_info\n" + exit 2; + esac + + printf "\nUninstall Complete\n\n" +} + +check_package_managers() { + + package_manager=''; + if command -v apt &> /dev/null ; then + if ! command -v dpkg-query &> /dev/null ; then + printf "[!!] Cant access dpkg-query\n" + exit 1; + fi + package_manager="apt"; + fi + + if command -v yum &> /dev/null ; then + package_manager="yum"; + fi + + if [ $package_manager == '' ]; then + echo "[!!] No package manager found: yum or apt must be installed" + exit 1; + fi; +} + + +# Confirms mac have critical binaries to run this script +# As of 3/2024 supports cs_package_manager.sh check +check_mac_dependencies() { + + # Install ggrep if not exists + if ! which ggrep >/dev/null 2>&1; then + echo "Attempting Auto install of ggrep" + + if ! which brew >/dev/null 2>&1; then + echo "Attempting Auto install of brew" + bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + fi + brew install grep + fi + + # Exit if ggrep still doesnt exist + if ! which ggrep >/dev/null 2>&1; then + echo "Failed to install ggrep" + echo "which ggrep" + echo "" + exit 1; + fi + +} + +check_operating_system() { + + if [[ $OSTYPE == 'darwin'* ]]; then + echo "Running on macOS" + check_mac_dependencies + + # on action=check - these values are used as triggers to prompt the user to select what OS/version they want to check against + distro_info="mac" + distro="mac" + version_id_exact=$(grep -A1 ProductVersion "/System/Library/CoreServices/SystemVersion.plist" | sed -n 's/.*\(.*\)<\/string>.*/\1/p') + version_id=$( echo "$version_id_exact" | awk -F. '{print $1}') + distro_short="${distro_info:0:3}${version_id}" + return + fi; + + distro_info=$(awk -F= '/^ID=/{gsub(/"/, "", $2); print $2}' /etc/os-release) + version_id_exact=$( grep 'VERSION_ID=' /etc/os-release | awk -F= '{gsub(/"/, "", $2); print $2}') + version_id=$( echo "$version_id_exact" | awk -F. '{print $1}') + + echo "Distro: $distro_info" + echo "Version: $version_id" + + # distros=(centos7 debian11 debian12 rockylinux8 rockylinux9 ubuntu20.04 ubuntu22.04) + case $distro_info in + centos | rhel ) + distro="${distro_info}${version_id}" + ;; + debian ) + distro="${distro_info}${version_id_exact}" + ;; + rocky ) + distro="rockylinux${version_id}" + ;; + ubuntu ) + distro="${distro_info}${version_id_exact}" + ;; + *) # unknown option + printf "\ncheck_operating_system: unknown os & version: $distro_info\n" + exit 2; + esac + distro_short="${distro_info:0:3}${version_id}" +} + +check_cpu_architecture() { + + architecture=$(uname -m) + echo "CPU: $architecture" + + # arch=(amd64 arm64) + case $architecture in + x86_64 ) + arch="amd64" + ;; + aarch64 ) + arch="arm64" + ;; + *) # unknown option + echo "Error: Unsupported architecture ($architecture)" + esac +} + +check_mdb_installed() { + packages="" + current_mariadb_version="" + case $package_manager in + yum ) + packages=$(yum list installed | grep -i mariadb) + current_mariadb_version=$(rpm -q --queryformat '%{VERSION}\n' MariaDB-server 2>/dev/null) + ;; + apt ) + packages=$(apt list --installed mariadb-* 2>/dev/null | grep -i mariadb); + current_mariadb_version=$(dpkg-query -f '${Version}\n' -W mariadb-server 2>/dev/null) + # remove prefix 1: & +maria~deb1 - example: current_mariadb_version="1:10.6.16.11+maria~deb1" + current_mariadb_version="${current_mariadb_version#*:}" + current_mariadb_version="${current_mariadb_version%%+*}" + ;; + *) # unknown option + printf "\ncheck_no_mdb_installed: package manager not implemented - $package_manager\n" + exit 2; + esac + + if [ -z "$packages" ]; then + printf "\nMariaDB packages are NOT installed. Please install them before continuing.\n" + echo $packages; + printf "Example: bash $0 install [community|enterprise] [version] \n\n" + exit 2; + fi; + + +} + +check_no_mdb_installed() { + + packages="" + case $distro_info in + centos | rhel | rocky ) + packages=$(yum list installed | grep -i mariadb) + ;; + ubuntu | debian ) + packages=$(apt list --installed mariadb-* 2>/dev/null | grep -i mariadb); + ;; + *) # unknown option + printf "\ncheck_no_mdb_installed: os & version not implemented: $distro_info\n" + exit 2; + esac + + if [ -n "$packages" ]; then + printf "\nMariaDB packages are installed. Please uninstall them before continuing.\n" + echo $packages; + printf "Example: bash $0 remove\n\n" + exit 2; + fi; +} + +check_aws_cli_installed() { + + if ! command -v aws &> /dev/null ; then + echo "[!] aws cli - binary could not be found" + echo "[+] Installing aws cli ..." + + cli_url="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" + case $architecture in + x86_64 ) + cli_url="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" + ;; + aarch64 ) + cli_url="https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" + ;; + *) # unknown option + echo "Error: Unsupported architecture ($architecture)" + esac + + case $distro_info in + centos | rhel | rocky ) + rm -rf aws awscliv2.zip + yum install unzip -y; + curl "$cli_url" -o "awscliv2.zip"; + unzip -q awscliv2.zip; + sudo ./aws/install; + mv /usr/local/bin/aws /usr/bin/aws; + aws configure set default.s3.max_concurrent_requests 70 + ;; + ubuntu | debian ) + rm -rf aws awscliv2.zip + if ! sudo apt install unzip -y; then + echo "[!!] Installing Unzip Failed: Trying update" + sudo apt update -y; + sudo apt install unzip -y; + fi + curl "$cli_url" -o "awscliv2.zip"; + unzip -q awscliv2.zip; + sudo ./aws/install; + mv /usr/local/bin/aws /usr/bin/aws; + aws configure set default.s3.max_concurrent_requests 70 + ;; + *) # unknown option + printf "\nos & version not implemented: $distro_info\n" + exit 2; + esac + + + fi +} + +check_dev_build_exists() { + + if ! aws s3 ls $s3_path --no-sign-request &> /dev/null; then + printf "[!] Defined dev build doesnt exist in aws\n\n" + exit 2; + fi; +} + +do_enterprise_apt_install() { + + # Install MariaDB + apt-get clean + if ! apt install mariadb-server -y --quiet; then + printf "\n[!] Failed to install mariadb-server \n\n" + exit 1; + fi + sleep 2 + systemctl daemon-reload + systemctl enable mariadb + systemctl start mariadb + + # Install Columnstore + if ! apt install mariadb-plugin-columnstore -y --quiet; then + printf "\n[!] Failed to install columnstore \n\n" + exit 1; + fi + + # Somes cmapi is installed with columnstore - double check + if ! is_cmapi_installed ; then + if ! apt install mariadb-columnstore-cmapi jq -y --quiet; then + printf "\n[!] Failed to install cmapi\n\n" + mariadb -e "show status like '%Columnstore%';" + fi; + fi + + if is_cmapi_installed ; then + systemctl daemon-reload + systemctl enable mariadb-columnstore-cmapi + systemctl start mariadb-columnstore-cmapi + mariadb -e "show status like '%Columnstore%';" + sleep 2 + + confirm_cmapi_online_and_configured + init_cs_up + fi + +} + +do_enterprise_yum_install() { + + # Install MariaDB + yum clean all + yum install MariaDB-server -y + sleep 2 + systemctl enable mariadb + systemctl start mariadb + + # Install Columnstore + if ! yum install MariaDB-columnstore-engine -y; then + printf "\n[!] Failed to install columnstore\n\n" + exit 1; + fi + + # Install Cmapi + if ! yum install MariaDB-columnstore-cmapi jq -y; then + printf "\n[!] Failed to install cmapi\n\n" + + else + systemctl enable mariadb-columnstore-cmapi + systemctl start mariadb-columnstore-cmapi + mariadb -e "show status like '%Columnstore%';" + sleep 1; + + confirm_cmapi_online_and_configured + init_cs_up + fi + +} + +enterprise_install() { + + version=$3 + check_set_es_token "$@" + + if [ -z $version ]; then + printf "\n[!] Version empty: $version\n\n" + exit 1; + fi; + + echo "Token: $enterprise_token" + echo "MariaDB Version: $version" + echo "-----------------------------------------------" + + url="https://dlm.mariadb.com/enterprise-release-helpers/mariadb_es_repo_setup" + if $enterprise_staging; then + url="https://dlm.mariadb.com/$enterprise_token/enterprise-release-helpers-staging/mariadb_es_repo_setup" + fi + + # Download Repo setup script + rm -rf mariadb_es_repo_setup + curl -LO "$url" -o mariadb_es_repo_setup; + chmod +x mariadb_es_repo_setup; + if ! bash mariadb_es_repo_setup --token="$enterprise_token" --apply --mariadb-server-version="$version"; then + printf "\n[!] Failed to apply mariadb_es_repo_setup...\n\n" + exit 2; + fi; + + case $distro_info in + centos | rhel | rocky ) + + if [ ! -f "/etc/yum.repos.d/mariadb.repo" ]; then printf "\n[!] Expected to find mariadb.repo in /etc/yum.repos.d \n\n"; exit 1; fi; + + if $enterprise_staging; then + sed -i 's/mariadb-es-main/mariadb-es-staging/g' /etc/yum.repos.d/mariadb.repo + sed -i 's/mariadb-enterprise-server/mariadb-enterprise-staging/g' /etc/yum.repos.d/mariadb.repo + printf "\n\n[+] Adjusted mariadb.repo to: mariadb-enterprise-staging\n\n" + fi; + + do_enterprise_yum_install "$@" + ;; + ubuntu | debian ) + + if [ ! -f "/etc/apt/sources.list.d/mariadb.list" ]; then printf "\n[!] Expected to find mariadb.list in /etc/apt/sources.list.d \n\n"; exit 1; fi; + + if $enterprise_staging; then + sed -i 's/mariadb-enterprise-server/mariadb-enterprise-staging/g' /etc/apt/sources.list.d/mariadb.list + apt update + printf "\n\n[+] Adjusted mariadb.list to: mariadb-enterprise-staging\n\n" + fi; + + do_enterprise_apt_install "$@" + ;; + *) # unknown option + printf "\nenterprise_install: os & version not implemented: $distro_info\n" + exit 2; + esac +} + +community_install() { + + version=$3 + if [ -z $version ]; then + printf "Version empty: $version\n" + + exit 1; + fi; + + echo "MariaDB Community Version: $version" + echo "-----------------------------------------------" + + # Download Repo setup + rm -rf mariadb_repo_setup + + if ! curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | bash -s -- --mariadb-server-version=mariadb-$version ; then + echo "version bad or mariadb_repo_setup unavailable. exiting ..." + exit 2; + fi; + + case $distro_info in + centos | rhel | rocky ) + do_community_yum_install "$@" + ;; + ubuntu | debian ) + do_community_apt_install "$@" + ;; + *) # unknown option + printf "\ncommunity_install: os & version not implemented: $distro_info\n" + exit 2; + esac + +} + +do_community_yum_install() { + + # Install MariaDB then Columnstore + yum clean all + if ! yum install MariaDB-server -y; then + printf "\n[!] Failed to install MariaDB-server \n\n" + exit 1; + fi + sleep 2; + systemctl enable mariadb + systemctl start mariadb + + # Install Columnstore + if ! yum install MariaDB-columnstore-engine -y; then + printf "\n[!] Failed to install columnstore \n\n" + exit 1; + fi + + cmapi_installable=$(yum list | grep MariaDB-columnstore-cmapi) + if [ -n "$cmapi_installable" ]; then + # Install Cmapi + if ! yum install MariaDB-columnstore-cmapi jq -y; then + printf "\n[!] Failed to install cmapi\n\n" + exit 1; + else + systemctl enable mariadb-columnstore-cmapi + systemctl start mariadb-columnstore-cmapi + mariadb -e "show status like '%Columnstore%';" + sleep 2 + + printf "\nPost Install\n" + confirm_cmapi_online_and_configured + init_cs_up + fi + fi +} + +do_community_apt_install() { + + # Install MariaDB + apt-get clean + if ! apt install mariadb-server -y --quiet; then + printf "\n[!] Failed to install mariadb-server \n\n" + exit 1; + fi + sleep 2 + systemctl daemon-reload + systemctl enable mariadb + systemctl start mariadb + + # Install Columnstore + if ! apt install mariadb-plugin-columnstore -y --quiet; then + printf "\n[!] Failed to install columnstore \n\n" + exit 1; + fi; + + if ! apt install mariadb-columnstore-cmapi jq -y --quiet ; then + printf "\n[!] Failed to install cmapi \n\n" + mariadb -e "show status like '%Columnstore%';" + else + systemctl daemon-reload + systemctl enable mariadb-columnstore-cmapi + systemctl start mariadb-columnstore-cmapi + mariadb -e "show status like '%Columnstore%';" + sleep 2 + + confirm_cmapi_online_and_configured + init_cs_up + fi +} + +get_set_cmapi_key() { + + CMAPI_CNF="/etc/columnstore/cmapi_server.conf" + + if [ ! -f $CMAPI_CNF ]; then + echo "[!!] No cmapi config file found" + exit 1; + fi; + + # Add API Key if missing + if [ -z "$(grep ^x-api-key $CMAPI_CNF)" ]; then + + if ! command -v openssl &> /dev/null ; then + api_key="19bb89d77cb8edfe0864e05228318e3dfa58e8f45435fbd9bd12c462a522a1e9" + else + api_key=$(openssl rand -hex 32) + fi + + printf "%-35s ..." " - Setting API Key:" + if cmapi_output=$( curl -s https://127.0.0.1:8640/cmapi/0.4.0/cluster/status \ + --header 'Content-Type:application/json' \ + --header "x-api-key:$api_key" -k ) ; then + printf " Done - $( echo $cmapi_output | jq -r tostring ) \n" + sleep 2; + else + printf " Failed to set API key\n\n" + exit 1; + fi + else + api_key=$(grep ^x-api-key $CMAPI_CNF | cut -d "=" -f 2 | tr -d " ") + fi +} + +add_node_cmapi_via_curl() { + + node_ip=$1 + if [ -z $api_key ]; then get_set_cmapi_key; fi; + + # Add Node + printf "%-35s ..." " - Adding primary node via curl" + if cmapi_output=$( curl -k -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/node \ + --header 'Content-Type:application/json' \ + --header "x-api-key:$api_key" \ + --data "{\"timeout\": 120, \"node\": \"$node_ip\"}" ); then + printf " Done - $(echo $cmapi_output | jq -r tostring )\n" + else + echo "Failed adding node" + exit 1; + fi + +} + +start_cs_via_systemctl() { + if systemctl start mariadb-columnstore ; then + echo " - Started Columnstore" + else + echo "[!!] Failed to start columnstore via systemctl" + exit 1; + fi; +} + +start_cs_cmapi_via_curl() { + + if [ -z $api_key ]; then get_set_cmapi_key; fi; + + if curl -k -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/start \ + --header 'Content-Type:application/json' \ + --header "x-api-key:$api_key" \ + --data '{"timeout":20}'; then + echo " - Started Columnstore" + else + echo " - [!] Failed to start columnstore via cmapi curl" + echo " - Trying via systemctl ..." + start_cs_via_systemctl + fi; +} + +stop_cs_via_systemctl_override() { + systemctl stop mariadb-columnstore-cmapi; + systemctl stop mcs-ddlproc; + systemctl stop mcs-dmlproc; + systemctl stop mcs-workernode@1; + systemctl stop mcs-workernode@2; + systemctl stop mcs-controllernode; + systemctl stop mcs-storagemanager; + systemctl stop mcs-primproc; + systemctl stop mcs-writeengineserver; +} + +stop_cs_via_systemctl() { + printf " - Trying to stop columnstore via systemctl" + if systemctl stop mariadb-columnstore ; then + printf " Done\n" + else + printf "\n[!!] Failed to stop columnstore\n" + exit 1; + fi; +} + +stop_cs_cmapi_via_curl() { + + if [ -z $api_key ]; then get_set_cmapi_key; fi; + + if curl -k -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/shutdown \ + --header 'Content-Type:application/json' \ + --header "x-api-key:$api_key" \ + --data '{"timeout":20}'; then + echo " - Stopped Columnstore via curl" + else + printf "\n[!] Failed to stop columnstore via cmapi\n" + stop_cs_via_systemctl + fi; +} + +add_primary_node_cmapi() { + + primary_ip="127.0.0.1" + if [ -z $api_key ]; then get_set_cmapi_key; fi; + + if command -v mcs &> /dev/null ; then + # Only add 127.0.0.1 if no nodes are configured in cmapi + if [ "$(mcs cluster status | jq -r '.num_nodes')" == "0" ]; then + printf "%-35s ..." " - Adding primary node" + if mcs_output=$( timeout 30s mcs cluster node add --node $primary_ip ); then + echo " Done - $( echo $mcs_output | jq -r tostring )" + else + echo "[!] Failed ... trying cmapi curl" + echo "$mcs_output" + add_node_cmapi_via_curl $primary_ip + fi; + fi; + + else + echo "mcs - binary could not be found" + add_node_cmapi_via_curl $primary_ip + printf "%-35s ..." " - Starting Columnstore Engine" + start_cs_cmapi_via_curl + fi +} + + +dev_install() { + + if [ -z $dev_drone_key ]; then printf "Missing dev_drone_key: \n"; exit; fi; + check_aws_cli_installed + + echo "Branch: $3" + echo "Build: $4" + dronePath="s3://$dev_drone_key" + branch="$3" + build="$4" + product="10.6-enterprise" + if [ -z "$branch" ]; then printf "Missing branch: $branch\n"; exit 2; fi; + if [ -z "$build" ]; then printf "Missing build: $branch\n"; exit 2; fi; + + # Construct URLs + s3_path="$dronePath/$branch/$build/$product/$arch" + drone_http=$(echo "$s3_path" | sed "s|s3://$dev_drone_key/|https://${dev_drone_key}.s3.amazonaws.com/|") + echo "Locations:" + echo "Bucket: $s3_path" + echo "Drone: $drone_http" + echo "###################################" + + check_dev_build_exists + + case $distro_info in + centos | rhel | rocky ) + s3_path="${s3_path}/$distro" + drone_http="${drone_http}/$distro" + do_dev_yum_install "$@" + ;; + ubuntu | debian ) + do_dev_apt_install "$@" + ;; + *) # unknown option + printf "\ndev_install: os & version not implemented: $distro_info\n" + exit 2; + esac + + + confirm_cmapi_online_and_configured + init_cs_up +} + +do_dev_yum_install() { + + echo "[drone] +name=Drone Repository +baseurl="$drone_http" +gpgcheck=0 +enabled=1 + " > /etc/yum.repos.d/drone.repo + yum clean all + # yum makecache + # yum list --disablerepo="*" --enablerepo="drone" + + # ALL RPMS: aws s3 cp $s3_path/ . --recursive --exclude "debuginfo" --include "*.rpm" + aws s3 cp $s3_path/ . --recursive --exclude "*" --include "MariaDB-server*" --exclude "*debug*" --no-sign-request + + # Confirm Downloaded server rpm + if ! ls MariaDB-server-*.rpm 1> /dev/null 2>&1; then + echo "Error: No MariaDB-server RPMs were found." + exit 1 + fi + + # Install MariaDB Server + if ! yum install MariaDB-server-*.rpm -y; then + printf "\n[!] Failed to install MariaDB-server \n\n" + exit 1; + fi + + # Install Columnstore + if ! yum install MariaDB-columnstore-engine -y; then + printf "\n[!] Failed to install columnstore \n\n" + exit 1; + fi + + # Install Cmapi + if ! yum install MariaDB-columnstore-cmapi jq -y; then + printf "\n[!] Failed to install cmapi\n\n" + exit 1; + else + systemctl start mariadb + systemctl enable mariadb-columnstore-cmapi + systemctl start mariadb-columnstore-cmapi + mariadb -e "show status like '%Columnstore%';" + sleep 2 + + confirm_cmapi_online_and_configured + init_cs_up + fi +} + +do_dev_apt_install() { + + echo "deb [trusted=yes] ${drone_http} ${distro}/" > /etc/apt/sources.list.d/repo.list + cat << EOF > /etc/apt/preferences +Package: * +Pin: origin cspkg.s3.amazonaws.com +Pin-Priority: 1700 +EOF + + # Install MariaDB + apt-get clean + apt-get update + if ! apt install mariadb-server -y --quiet; then + printf "\n[!] Failed to install mariadb-server \n\n" + exit 1; + fi + sleep 2 + systemctl daemon-reload + systemctl enable mariadb + systemctl start mariadb + + # Install Columnstore + if ! apt install mariadb-plugin-columnstore -y --quiet; then + printf "\n[!] Failed to install columnstore \n\n" + exit 1; + fi; + + if ! apt install mariadb-columnstore-cmapi jq -y --quiet ; then + printf "\n[!] Failed to install cmapi \n\n" + mariadb -e "show status like '%Columnstore%';" + else + systemctl daemon-reload + systemctl enable mariadb-columnstore-cmapi + systemctl start mariadb-columnstore-cmapi + mariadb -e "show status like '%Columnstore%';" + sleep 2 + + confirm_cmapi_online_and_configured + init_cs_up + fi + +} + +do_install() { + + check_operating_system + check_cpu_architecture + check_no_mdb_installed + check_package_managers + + repo=$2 + enterprise_staging=false + echo "Repository: $repo" + case $repo in + enterprise ) + # pull from enterprise repo + enterprise_install "$@" ; + ;; + enterprise_staging ) + enterprise_staging=true + enterprise_install "$@" ; + ;; + community ) + # pull from public community repo + community_install "$@" ; + ;; + dev ) + # pull from dev repo - requires dev_drone_key + dev_install "$@" ; + ;; + *) # unknown option + echo "Unknown repo: $repo\n" + exit 2; + esac + + printf "\nInstall Complete\n\n" +} + +# Small augmentation of https://github.com/mariadb-corporation/mariadb-columnstore-engine/blob/develop/cmapi/check_ready.sh +cmapi_check_ready() { + SEC_TO_WAIT=15 + cmapi_success=false + for i in $(seq 1 $SEC_TO_WAIT); do + printf "." + if ! $(curl -k -s --output /dev/null --fail https://127.0.0.1:8640/cmapi/ready); then + sleep 1 + else + cmapi_success=true + break + fi + done + + if $cmapi_success; then + return 0; + else + printf "\nCMAPI not ready after waiting $SEC_TO_WAIT seconds. Check log file for further details.\n\n" + exit 1; + fi +} + +confirm_cmapi_online_and_configured() { + + if command -v mcs &> /dev/null; then + cmapi_current_status=$(mcs cmapi is-ready 2> /dev/null); + if [ $? -ne 0 ]; then + + # if cmapi is not online - check systemd is running and start cmapi + if [ "$(ps -p 1 -o comm=)" = "systemd" ]; then + + printf "%-35s .." " - Checking CMAPI Online" + if systemctl start mariadb-columnstore-cmapi; then + cmapi_check_ready + printf " Pass\n" + else + echo "[!!] Failed to start CMAPI" + exit 1; + fi + else + printf "systemd is not running - cant start cmapi\n\n" + exit 1; + fi + else + + # Check if the JSON string is in the expected format + if ! echo "$cmapi_current_status" | jq -e '.started | type == "boolean"' >/dev/null; then + echo "Error: CMAPI JSON string response is not in the expected format" + exit 1 + fi + + # Check if 'started' is true + if ! echo "$cmapi_current_status" | jq -e '.started == true' >/dev/null; then + echo "Error: CMAPI 'started' is not true" + echo "mcs cmapi is-ready" + exit 1 + fi + fi + else + printf "%-35s ..." " - Checking CMAPI online" + cmapi_check_ready + printf " Done\n" + fi; + + + confirm_nodes_configured +} + +# currently supports singlenode only +confirm_nodes_configured() { + # If the first run after install will set cmapi key for 'mcs cluster status' to work + if [ -z $api_key ]; then get_set_cmapi_key; fi; + + # Check for edge case of cmapi not configured + if command -v mcs &> /dev/null; then + if [ "$(mcs cluster status | jq -r '.num_nodes')" == "0" ]; then + add_primary_node_cmapi + sleep 1; + fi + else + + if [ "$(curl -k -s https://127.0.0.1:8640/cmapi/0.4.0/cluster/status \ + --header 'Content-Type:application/json' \ + --header "x-api-key:$api_key" | jq -r '.num_nodes')" == "0" ] ; then + echo " - Stopped Columnstore via curl" + else + add_primary_node_cmapi + sleep 1; + fi; + fi +} + +# For future release +do_dev_upgrade() { + echo "fsadfa" +} + +# For future release +dev_upgrade() { + + # Variables + if [ -z $dev_drone_key ]; then printf "[!] Missing dev_drone_key \nvi $0\n"; exit; fi; + check_aws_cli_installed + echo "Branch: $3" + echo "Build: $4" + dronePath="s3://$dev_drone_key" + branch="$3" + build="$4" + product="10.6-enterprise" + if [ -z "$branch" ]; then printf "Missing branch: $branch\n"; exit 2; fi; + if [ -z "$build" ]; then printf "Missing build: $branch\n"; exit 2; fi; + + # Construct URLs + s3_path="$dronePath/$branch/$build/$product/$arch" + drone_http=$(echo "$s3_path" | sed "s|s3://$dev_drone_key/|https://${dev_drone_key}.s3.amazonaws.com/|") + echo "Upgrade Version" + echo "Bucket: $s3_path" + echo "Drone: $drone_http" + echo "-----------------------------------------------" + + # Prechecks + printf "\nPrechecks\n" + check_dev_build_exists + check_gtid_strict_mode + + # Stop All + init_cs_down + wait_cs_down + stop_mariadb + stop_cmapi + + # Make backups of configurations, dbrms + pre_upgrade_dbrm_backup + pre_upgrade_configuration_backup + + # Upgrade + do_dev_upgrade + + # Start All + printf "\nStartup\n" + start_mariadb + start_cmapi + init_cs_up + + # Post Upgrade + confirm_dbrmctl_ok + run_mariadb_upgrade +} + +do_community_upgrade () { + + # Download Repo setup + printf "\nDownloading Repo Setup\n" + rm -rf mariadb_repo_setup + if ! curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | bash -s -- --mariadb-server-version=mariadb-$version ; then + printf "\n[!] Failed to apply mariadb_repo_setup...\n\n" + exit 2; + fi; + + case $package_manager in + yum ) + if [ ! -f "/etc/yum.repos.d/mariadb.repo" ]; then printf "\n[!] enterprise_upgrade: Expected to find mariadb.repo in /etc/yum.repos.d \n\n"; exit 1; fi; + + # Run the YUM update + printf "\nBeginning Update\n" + if yum update "MariaDB-*" "MariaDB-columnstore-engine" "MariaDB-columnstore-cmapi"; then + printf " - Success Update\n" + else + printf "[!!] Failed to update - exit code: $? \n" + printf "Check messages above if uninstall/reinstall of new version required\n\n" + exit 1; + fi + ;; + apt ) + if [ ! -f "/etc/apt/sources.list.d/mariadb.list" ]; then printf "\n[!] enterprise_upgrade: Expected to find mariadb.list in /etc/apt/sources.list.d \n\n"; exit 1; fi; + + # Run the APT update + printf "\nBeginning Update\n" + apt-get clean + if apt update; then + echo " - Success Update" + else + echo "[!!] Failed to update " + exit 1; + fi + + if apt install --only-upgrade '?upgradable ?name(mariadb.*)'; then + echo " - Success Update mariadb.*" + else + echo "[!!] Failed to update " + exit 1; + fi + systemctl daemon-reload + ;; + *) # unknown option + printf "\ndo_community_upgrade: os & version not implemented: $distro_info\n" + exit 2; + esac +} + +community_upgrade() { + + version=$3 + if [ -z $version ]; then + printf "\n[!] Version empty: $version\n\n" + exit 1; + fi; + + echo "Current MariaDB Verison: $current_mariadb_version" + echo "Upgrade To MariaDB Version: $version" + echo "-----------------------------------------------" + + # Prechecks + printf "\nPrechecks\n" + check_gtid_strict_mode + check_mariadb_versions + + # Stop All + init_cs_down + wait_cs_down + stop_mariadb + stop_cmapi + + # Make backups of configurations, dbrms + pre_upgrade_dbrm_backup + pre_upgrade_configuration_backup + + # Upgrade + do_community_upgrade + + # Start All + printf "\nStartup\n" + start_mariadb + start_cmapi + init_cs_up + + # Post Upgrade + confirm_dbrmctl_ok + run_mariadb_upgrade + +} + +confirm_dbrmctl_ok() { + retry_limit=1800 + retry_counter=0 + printf "%-35s ... " " - Checking DBRM Status" + good_dbrm_status="OK. (and the system is ready)" + current_status=$(dbrmctl -v status); + while [ "$current_status" != "$good_dbrm_status" ]; do + sleep 1 + printf "." + current_status=$(dbrmctl -v status); + if [ $? -ne 0 ]; then + printf "\n[!] Failed to get dbrmctl -v status\n\n" + exit 1 + fi + if [ $retry_counter -ge $retry_limit ]; then + printf "\n[!] Set columnstore readonly wait retry limit exceeded: $retry_counter \n\n" + exit 1 + fi + + ((retry_counter++)) + done + printf "$current_status \n" +} + +pre_upgrade_dbrm_backup() { + + if [ ! -f "mcs_backup_manager.sh" ]; then + wget https://raw.githubusercontent.com/mariadb-corporation/mariadb-columnstore-engine/develop/cmapi/scripts/mcs_backup_manager.sh; chmod +x mcs_backup_manager.sh; + fi; + if ! source mcs_backup_manager.sh source ;then + printf "\n[!!] Failed to source mcs_backup_manager.sh\n\n" + exit 1; + else + echo " - Sourced mcs_backup_manager.sh" + fi + # Confirm the function exists and the source of mcs_backup_manager.sh worked + if command -v process_dbrm_backup &> /dev/null; then + # Take an automated backup + if ! process_dbrm_backup -r 9999 -nb preupgrade_dbrm_backup --quiet ; then + echo "[!!] Failed to take a DBRM backup before restoring" + echo "exiting ..." + exit 1; + fi; + else + echo "Error: 'process_dbrm_backup' function not found via mcs_backup_manager.sh"; + exit 1; + fi + + +} + +pre_upgrade_configuration_backup() { + pre_upgrade_config_directory="/tmp/preupgrade-configurations-$(date +%m-%d-%Y-%H%M)" + case $distro_info in + centos | rhel | rocky ) + printf "[+] Created: $pre_upgrade_config_directory \n" + mkdir -p $pre_upgrade_config_directory + print_and_copy "/etc/columnstore/Columnstore.xml" "$pre_upgrade_config_directory" + print_and_copy "/etc/columnstore/storagemanager.cnf" "$pre_upgrade_config_directory" + print_and_copy "/etc/columnstore/cmapi_server.conf" "$pre_upgrade_config_directory" + print_and_copy "/etc/my.cnf.d/server.cnf" "$pre_upgrade_config_directory" + # convert server.cnf to a find incase mysql dir not standard + ;; + ubuntu | debian ) + printf "[+] Created: $pre_upgrade_config_directory \n" + mkdir -p $pre_upgrade_config_directory + print_and_copy "/etc/columnstore/Columnstore.xml" "$pre_upgrade_config_directory" + print_and_copy "/etc/columnstore/storagemanager.cnf" "$pre_upgrade_config_directory" + print_and_copy "/etc/columnstore/cmapi_server.conf" "$pre_upgrade_config_directory" + print_and_copy "/etc/mysql/mariadb.conf.d/*server.cnf" "$pre_upgrade_config_directory" + ;; + *) # unknown option + printf "\npre_upgrade_configuration_backup: os & version not implemented: $distro_info\n" + exit 2; + esac +} + + +check_mariadb_versions() { + + if [ -z "$current_mariadb_version" ]; then + printf "[!] No current current_mariadb_version detected" + exit 2; + fi + + if [ -z "$version" ]; then + printf "[!] No current upgrade version detected" + exit 2; + fi + + printf "%-35s ..." " - Checking MariaDB Version Newer" + compare_versions "$current_mariadb_version" "$version" + printf " Done\n" +} + +check_gtid_strict_mode() { + if ! command -v my_print_defaults &> /dev/null; then + printf "\n[!] my_print_defaults not found. Ensure gtid_strict_mode=0 \n" + else + printf "%-35s ..." " - Checking gtid_strict_mode" + strict_mode=$(my_print_defaults --mysqld 2>/dev/null | grep "gtid[-_]strict[-_]mode") + if [ -n "$strict_mode" ] && [ $strict_mode == "--gtid_strict_mode=1" ]; then + echo "my_print_defaults --mysqld | grep gtid[-_]strict[-_]mode Result: $strict_mode" + printf "Disable gtid_strict_mode before trying again\n\n" + exit 1; + else + printf " Done\n" + fi + fi +} + +run_mariadb_upgrade() { + if ! command -v mariadb-upgrade &> /dev/null; then + printf "\n[!] mariadb-upgrade not found. Please install mariadb-upgrade\n\n" + exit 1; + fi + + if [ "$pm_number" == "1" ]; then + printf "\nMariaDB Upgrade\n" + if ! mariadb-upgrade --write-binlog ; then + printf "[!!] Failed to complete mariadb-upgrade \n" + exit 1; + fi + fi +} + +do_enterprise_upgrade() { + + # Download Repo setup script & run it + printf "\nDownloading Repo Setup\n" + rm -rf mariadb_es_repo_setup + url="https://dlm.mariadb.com/enterprise-release-helpers/mariadb_es_repo_setup" + if $enterprise_staging; then + url="https://dlm.mariadb.com/$enterprise_token/enterprise-release-helpers-staging/mariadb_es_repo_setup" + fi + curl -LO "$url" -o mariadb_es_repo_setup; + chmod +x mariadb_es_repo_setup; + if ! bash mariadb_es_repo_setup --token="$enterprise_token" --apply --mariadb-server-version="$version"; then + printf "\n[!] Failed to apply mariadb_es_repo_setup...\n\n" + exit 2; + fi; + + case $package_manager in + yum ) + if [ ! -f "/etc/yum.repos.d/mariadb.repo" ]; then printf "\n[!] enterprise_upgrade: Expected to find mariadb.repo in /etc/yum.repos.d \n\n"; exit 1; fi; + + if $enterprise_staging; then + sed -i 's/mariadb-es-main/mariadb-es-staging/g' /etc/yum.repos.d/mariadb.repo + sed -i 's/mariadb-enterprise-server/mariadb-enterprise-staging/g' /etc/yum.repos.d/mariadb.repo + printf "\n\n[+] Adjusted mariadb.repo to: mariadb-enterprise-staging\n\n" + fi; + + # Run the YUM update + printf "\nBeginning Update\n" + if yum update "MariaDB-*" "MariaDB-columnstore-engine" "MariaDB-columnstore-cmapi"; then + echo " - Success Update" + else + echo "[!!] Failed to update " + exit 1; + fi + ;; + apt ) + if [ ! -f "/etc/apt/sources.list.d/mariadb.list" ]; then printf "\n[!] enterprise_upgrade: Expected to find mariadb.list in /etc/apt/sources.list.d \n\n"; exit 1; fi; + + if $enterprise_staging; then + sed -i 's/mariadb-enterprise-server/mariadb-enterprise-staging/g' /etc/apt/sources.list.d/mariadb.list + apt update + printf "\n\n[+] Adjusted mariadb.list to: mariadb-enterprise-staging\n\n" + fi; + + # Run the APT update + printf "\nBeginning Update\n" + apt-get clean + if apt update; then + echo " - Success Update" + else + echo "[!!] Failed to update " + exit 1; + fi + + if apt install --only-upgrade '?upgradable ?name(mariadb.*)'; then + echo " - Success Update mariadb.*" + else + echo "[!!] Failed to update " + exit 1; + fi + systemctl daemon-reload + ;; + *) # unknown option + printf "\nenterprise_upgrade: os & version not implemented: $distro_info\n" + exit 2; + esac +} + +enterprise_upgrade() { + + # Variables + check_set_es_token "$@" + version=$3 + if [ -z $version ]; then + printf "\n[!] Version empty: $version\n\n" + exit 1; + fi; + echo "Token: $enterprise_token" + echo "Current MariaDB Verison: $current_mariadb_version" + echo "Upgrade To MariaDB Version: $version" + echo "-----------------------------------------------" + + # Prechecks + printf "\nPrechecks\n" + check_gtid_strict_mode + check_mariadb_versions + + # Stop All + init_cs_down + wait_cs_down + stop_mariadb + stop_cmapi + + # Make backups of configurations, dbrms + pre_upgrade_dbrm_backup + pre_upgrade_configuration_backup + + # Upgrade + do_enterprise_upgrade + + # Start All + printf "\nStartup\n" + start_mariadb + start_cmapi + init_cs_up + + # Post Upgrade + confirm_dbrmctl_ok + run_mariadb_upgrade +} + +do_upgrade() { + + check_operating_system + check_cpu_architecture + check_package_managers + check_mdb_installed + + repo=$2 + echo "Repository: $repo" + enterprise_staging=false + case $repo in + enterprise ) + enterprise_upgrade "$@" ; + ;; + enterprise_staging ) + enterprise_staging=true + enterprise_upgrade "$@" ; + ;; + community ) + community_upgrade "$@" ; + ;; + dev ) + # For future release + # dev_upgrade "$@" ; + ;; + *) # unknown option + echo "do_upgrade - Unknown repo: $repo\n" + exit 2; + esac + + printf "\nUpgrade Complete\n\n" + +} + +# A quick way when a mac user runs "cs_package_manager.sh check" +# since theres no /etc/os-release to auto detect what OS & version to search the mariadb repos on mac +prompt_user_for_os() { + + # Prompt the user to select an operating system + echo "Please select an operating system to search for:" + os_options=("centos" "rhel" "rocky" "ubuntu" "debian") + select opt in "${os_options[@]}"; do + case $opt in + "centos" | "rhel" | "rocky" ) + distro_info=$opt + echo "What major version of $distro_info:" + short_options=("7" "8" "9") + select short in "${short_options[@]}"; do + case $short in + "7" | "8" | "9") + version_id=$short + distro_short="${distro_info:0:3}${version_id}" + break + ;; + + *) + echo "Invalid option, please try again." + ;; + esac + done + break + ;; + "ubuntu") + distro_info=$opt + echo "What major version of $distro_info:" + short_options=("20.04" "22.04" "23.04" "23.10") + select short in "${short_options[@]}"; do + case $short in + "20.04" | "22.04" | "23.04" | "23.10") + version_id=${short//./} + #version_id=$short + distro_short="${distro_info:0:3}${version_id}" + break + ;; + + *) + echo "Invalid option, please try again." + ;; + esac + done + break + ;; + + *) + echo "Invalid option, please try again." + ;; + esac + done + + echo "Distro: $distro_info" + echo "Version: $version_id" + +} + +# A quick way for mac users to select an OS when running "cs_package_manager.sh check" +# since theres no /etc/os-release to auto detect what OS & version to search the mariadb repos on mac +prompt_user_for_os() { + + # Prompt the user to select an operating system + echo "Please select an operating system to search for:" + os_options=("centos" "rhel" "rocky" "ubuntu" "debian") + select opt in "${os_options[@]}"; do + case $opt in + "centos" | "rhel" | "rocky" ) + distro_info=$opt + echo "What major version of $distro_info:" + short_options=("7" "8" "9") + select short in "${short_options[@]}"; do + case $short in + "7" | "8" | "9") + version_id=$short + distro_short="${distro_info:0:3}${version_id}" + break + ;; + + *) + echo "Invalid option, please try again." + ;; + esac + done + break + ;; + "ubuntu") + distro_info=$opt + echo "What major version of $distro_info:" + short_options=("20.04" "22.04" "23.04" "23.10") + select short in "${short_options[@]}"; do + case $short in + "20.04" | "22.04" | "23.04" | "23.10") + version_id=${short//./} + #version_id=$short + distro_short="${distro_info:0:3}${version_id}" + break + ;; + + *) + echo "Invalid option, please try again." + ;; + esac + done + break + ;; + + *) + echo "Invalid option, please try again." + ;; + esac + done + + echo "Distro: $distro_info" + echo "Version: $version_id" + +} + +do_check() { + + check_operating_system + check_cpu_architecture + + repo=$2 + dbm_tmp_file="mdb-tmp.html" + grep=$(which grep) + if [ $distro_info == "mac" ]; then + grep=$(which ggrep) + + prompt_user_for_os + fi + + echo "Repository: $repo" + case $repo in + enterprise ) + check_set_es_token "$@" + + url_base="https://dlm.mariadb.com" + url_page="/browse/$enterprise_token/mariadb_enterprise_server/" + # aaaa + ignore="/login" + at_least_one=false + curl -s "$url_base$url_page" > $dbm_tmp_file + if [ $? -ne 0 ]; then + printf "\n[!] Failed to access $url_base$url_page\n\n" + exit 1 + fi + if grep -q "404 - Page Not Found" $dbm_tmp_file; then + printf "\n[!] 404 - Failed to access $url_base$url_page\n" + printf "Confirm your ES token works\n" + printf "See: https://customers.mariadb.com/downloads/token/ \n\n" + exit 1 + fi + major_version_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep $url_page | grep -v $ignore ) + #echo $major_version_links + for major_link in ${major_version_links[@]} + do + #echo "Major: $major_link" + curl -s "$url_base$major_link" > $dbm_tmp_file + minor_version_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep $url_page | grep -v $ignore ) + for minor_link in ${minor_version_links[@]} + do + if [ "$minor_link" != "$url_page" ]; then + #echo " Minor: $minor_link" + case $distro_info in + centos | rhel | rocky ) + path="rpm/rhel/$version_id/$architecture/rpms/" + curl -s "$url_base$minor_link$path" > $dbm_tmp_file + package_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep "$path" | grep "columnstore-engine" | grep -v debug | tail -1 ) + if [ ! -z "$package_links" ]; then + #echo "----------" + #echo "$package_links" + at_least_one=true + mariadb_version="${package_links#*mariadb-enterprise-server/}" + columnstore_version="${mariadb_version#*columnstore-engine-}" + mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" + + # unqiue to enterprise + standard_mariadb_version="${mariadb_version//-/_}" + columnstore_version="$( echo $columnstore_version | awk -F"${standard_mariadb_version}_" '{print $2}' | awk -F".el" '{print $1}' )" + printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; + fi; + ;; + ubuntu | debian ) + + path="deb/pool/main/m/" + curl -s "$url_base$minor_link$path" > $dbm_tmp_file + + # unqiue - this link/path can change + mariadb_version_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep -v $ignore | grep -v cmapi | grep ^mariadb ) + #echo "$url_base$minor_link$path" + for mariadb_link in ${mariadb_version_links[@]} + do + #echo $mariadb_link + path="deb/pool/main/m/$mariadb_link" + curl -s "$url_base$minor_link$path" > $dbm_tmp_file + package_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep "$path" | grep "columnstore_" | grep -v debug | grep $distro_short | tail -1 ) + if [ ! -z "$package_links" ]; then + # echo "$package_links" + # echo "----------" + at_least_one=true + mariadb_version="${package_links#*mariadb-enterprise-server/}" + columnstore_version="${mariadb_version#*columnstore-engine-}" + mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" + columnstore_version="$( echo $columnstore_version | awk -F"columnstore_" '{print $2}' | awk -F"-" '{print $2}' | awk -F"+" '{print $1}' )" + printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; + fi; + done + + ;; + *) # unknown option + printf "\ndo_check: Not implemented for: $distro_info\n\n" + exit 2; + esac + fi; + done + done + + if ! $at_least_one; then + printf "\n[!] No columnstore packages found for: $distro_short $arch \n\n" + fi + ;; + community ) + + # pull from public community repo + url_base="https://dlm.mariadb.com" + url_page="/browse/mariadb_server/" + ignore="/login" + at_least_one=false + curl -s "$url_base$url_page" > $dbm_tmp_file + major_version_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep $url_page | grep -v $ignore ) + + for major_link in ${major_version_links[@]} + do + #echo "Major: $major_link" + curl -s "$url_base$major_link" > $dbm_tmp_file + minor_version_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep $url_page | grep -v $ignore ) + for minor_link in ${minor_version_links[@]} + do + if [ "$minor_link" != "$url_page" ]; then + #echo " Minor: $minor_link" + case $distro_info in + centos | rhel | rocky ) + path="yum/centos/$version_id/$architecture/rpms/" + curl -s "$url_base$minor_link$path" > $dbm_tmp_file + package_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep "$path" | grep "columnstore-engine" | grep -v debug | tail -1 ) + if [ ! -z "$package_links" ]; then + # echo "$package_links" + # echo "----------" + at_least_one=true + mariadb_version="${package_links#*mariadb-}" + columnstore_version="${mariadb_version#*columnstore-engine-}" + mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" + columnstore_version="$( echo $columnstore_version | awk -F_ '{print $2}' | awk -F".el" '{print $1}' )" + # echo "MariaDB: $mariadb_version Columnstore: $columnstore_version" + printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; + fi; + ;; + ubuntu | debian ) + path="repo/$distro_info/pool/main/m/mariadb/" + curl -s "$url_base$minor_link$path" > $dbm_tmp_file + package_links=$($grep -oP 'href="\K[^"]+' $dbm_tmp_file | grep "$path" | grep "columnstore_" | grep -v debug | grep $distro_short | tail -1 ) + if [ ! -z "$package_links" ]; then + # echo "$package_links" + # echo "----------" + at_least_one=true + mariadb_version="${package_links#*mariadb-}" + columnstore_version="${mariadb_version#*columnstore-engine-}" + mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" + columnstore_version="$( echo $columnstore_version | awk -F"columnstore_" '{print $2}' | awk -F"-" '{print $2}' | awk -F'\\+maria' '{print $1}' 2>/dev/null) " + # echo "MariaDB: $mariadb_version Columnstore: $columnstore_version" + printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; + fi; + ;; + *) # unknown option + printf "Not implemented for: $distro_info\n" + exit 2; + esac + fi + done + done + + if ! $at_least_one; then + printf "\n[!] No columnstore packages found for: $distro_short $arch \n\n" + fi + ;; + dev ) + printf "Not implemented for: $repo\n" + exit 1; + ;; + *) # unknown option + printf "Unknown repo: $repo\n" + exit 2; + esac +} + +global_dependencies() { + if ! command -v curl &> /dev/null; then + printf "\n[!] curl not found. Please install curl\n\n" + exit 1; + fi +} + +check_set_es_token() { + while [[ $# -gt 0 ]]; do + parameter="$1" + + case $parameter in + --token) + enterprise_token="$2" + shift # past argument + shift # past value + ;; + *) # unknown option + shift # past argument + ;; + esac + done + + if [ -z $enterprise_token ]; then + printf "\n[!] Enterprise token empty: $enterprise_token\n" + printf "1) edit $0 enterprise_token='xxxxxx' \n" + printf "2) add flag --token xxxxxxxxx \n" + printf "Find your token @ https://customers.mariadb.com/downloads/token/ \n\n" + + exit 1; + fi; +} + +print_cs_pkg_mgr_version_info() { + echo "MariaDB Columnstore Package Manager" + echo "Version: $cs_pkg_manager_version" +} + +global_dependencies + +case $action in + remove ) + do_remove "$@" ; + ;; + install ) + do_install "$@"; + ;; + upgrade ) + do_upgrade "$@" ; + ;; + check ) + do_check "$@" + ;; + help | -h | --help | -help) + print_help_text; + exit 1; + ;; + add ) + add_node_cmapi_via_curl "127.0.0.1" + ;; + -v | version ) + print_cs_pkg_mgr_version_info + ;; + source ) + return 0; + ;; + *) # unknown option + printf "Unknown Action: $1\n" + print_help_text + exit 2; +esac diff --git a/extra/mcs_backup_manager.sh b/cmapi/scripts/mcs_backup_manager.sh similarity index 71% rename from extra/mcs_backup_manager.sh rename to cmapi/scripts/mcs_backup_manager.sh index 1790bf0d7..d140c1c94 100644 --- a/extra/mcs_backup_manager.sh +++ b/cmapi/scripts/mcs_backup_manager.sh @@ -13,8 +13,8 @@ # ######################################################################## # Documentation: bash mcs_backup_manager.sh help -# Version: 3.4 -# +# Version: 3.8 +# # Backup Example # LocalStorage: sudo ./mcs_backup_manager.sh backup # S3: sudo ./mcs_backup_manager.sh backup -bb s3://my-cs-backups @@ -23,22 +23,23 @@ # # Restore Example # LocalStorage: sudo ./mcs_backup_manager.sh restore -l -# S3: sudo ./mcs_backup_manager.sh restore -bb s3://my-cs-backups -l -# +# S3: sudo ./mcs_backup_manager.sh restore -bb s3://my-cs-backups -l +# ######################################################################## - +mcs_bk_manager_version="3.8" start=$(date +%s) action=$1 print_action_help_text() { echo " MariaDB Columnstore Backup Manager - + Actions: backup Full & Incremental columnstore backup with additional flags to augment the backup taken restore Restore a backup taken with this script dbrm_backup Quick hot backup of internal columnstore metadata only - only use under support recommendation + dbrm_restore Restore internal columnstore metadata from dbrm_backup - only use under support recommendation Documentation: bash $0 help @@ -54,31 +55,22 @@ check_operating_system() { # Supported OS case $OPERATING_SYSTEM in - centos ) + centos | rhel | rocky ) return 1; ;; - rhel ) - return 1 - ;; - debian ) - return 1; - ;; - rocky ) - return 1; - ;; - ubuntu ) + ubuntu | debian ) return 1; ;; *) # unknown option printf "\ncheck_operating_system: unknown os & version: $OPERATING_SYSTEM\n" exit 2; - esac + esac } load_default_backup_variables() { check_operating_system - + # What directory to store the backups on this machine or the target machine. # Consider write permissions of the scp user and the user running this script. # Mariadb-backup will use this location as a tmp dir for S3 and remote backups temporarily @@ -109,7 +101,7 @@ load_default_backup_variables() ;; *) # unknown option handle_failed_dependencies "\nload_default_backup_variables: unknown os & version: $OPERATING_SYSTEM\n"; - esac + esac # Fixed Paths CS_CONFIGS_PATH="/etc/columnstore" @@ -123,7 +115,7 @@ load_default_backup_variables() cs_cache=$(grep -A25 "\[Cache\]" $STORAGEMANGER_CNF | grep ^path | cut -d "=" -f 2 | tr -d " ") # What storage topogoly is being used by Columnstore - found in /etc/columnstore/storagemanager.cnf - # Options: "LocalStorage" or "S3" + # Options: "LocalStorage" or "S3" storage=$(grep -m 1 "^service = " $STORAGEMANGER_CNF | awk '{print $3}') # Name of the existing bucket used in the cluster - found in /etc/columnstore/storagemanager.cnf @@ -133,7 +125,7 @@ load_default_backup_variables() # modes ['direct','indirect'] - direct backups run on the columnstore nodes themselves. indirect run on another machine that has read-only mounts associated with columnstore/mariadb mode="direct" - # Name of the Configuration file to load variables from + # Name of the Configuration file to load variables from config_file=".cs-backup-config" # Track your write speed with "dstat --top-cpu --top-io" @@ -160,7 +152,7 @@ load_default_backup_variables() if [ ! -f /var/lib/columnstore/local/module ]; then pm="pm1"; else pm=$(cat /var/lib/columnstore/local/module); fi; PM_NUMBER=$(echo "$pm" | tr -dc '0-9') if [[ -z $PM_NUMBER ]]; then PM_NUMBER=1; fi; - + #source_ips=$(grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" /etc/columnstore/Columnstore.xml) #source_host_names=$(grep "" /etc/columnstore/Columnstore.xml) cmapi_key="$(grep "x-api-key" $CS_CONFIGS_PATH/cmapi_server.conf | awk '{print $3}' | tr -d "'" )"; @@ -191,13 +183,16 @@ load_default_backup_variables() s3_url="" no_verify_ssl=false + # Deletes backups older than this variable retention_days + retention_days=0 + # Tracks if flush read lock has been run read_lock=false incremental=false columnstore_online=false confirm_xmllint_installed - + # Number of DBroots # Integer usually 1 or 3 DBROOT_COUNT=$(xmllint --xpath "string(//DBRootCount)" $CS_CONFIGS_PATH/Columnstore.xml) @@ -205,7 +200,7 @@ load_default_backup_variables() } parse_backup_variables() -{ +{ # Dynamic Arguments while [[ $# -gt 0 ]]; do key="$1" @@ -304,7 +299,7 @@ parse_backup_variables() shift # past argument ;; -nv-ssl| --no-verify-ssl) - no_verify_ssl=true + no_verify_ssl=true shift # past argument ;; -pi| --poll-interval) @@ -317,6 +312,11 @@ parse_backup_variables() shift # past argument shift # past value ;; + -r|--retention-days) + retention_days="$2" + shift # past argument + shift # past value + ;; -h|--help|-help|help) print_backup_help_text; exit 1; @@ -337,7 +337,7 @@ parse_backup_variables() print_backup_help_text() { - echo " + echo " Columnstore Backup -bl | --backup-location Directory where the backup will be saved @@ -347,7 +347,7 @@ print_backup_help_text() -url | --endpoint-url Onprem url to s3 storage api example: http://127.0.0.1:8000 -nv-ssl| --no-verify-ssl Skips verifying ssl certs, useful for onpremise s3 storage -s | --storage The storage used by columnstore data 'LocalStorage' or 'S3' - -i | --incremental Adds columnstore deltas to an existing full backup + -i | --incremental Adds columnstore deltas to an existing full backup [ , auto_most_recent ] -P | --parallel Number of parallel rsync/compression threads to run -f | --config-file Path to backup configuration file to load variables from -sbrm | --skip-save-brm Skip saving brm prior to running a backup - ideal for dirty backups @@ -360,22 +360,23 @@ print_backup_help_text() -q | --quiet Silence verbose copy command outputs -c | --compress Compress backup in X format - Options: [ pigz ] -nb | --name-backup Define the name of the backup - default: date +%m-%d-%Y + -r | --retention-days Retain backups created within the last X days, the rest are deleted, default 0 = keep all backups -ha | --highavilability Hint wether shared storage is attached @ below on all nodes to see all data HA LocalStorage ( /var/lib/columnstore/dataX/ ) - HA S3 ( /var/lib/columnstore/storagemanager/ ) + HA S3 ( /var/lib/columnstore/storagemanager/ ) Local Storage Examples: ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage -P 8 - ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage --incremental 02-18-2022 + ./$0 backup -bl /tmp/backups/ -bd Local -s LocalStorage --incremental auto_most_recent ./$0 backup -bl /tmp/backups/ -bd Remote -scp root@172.31.6.163 -s LocalStorage - S3 Examples: + S3 Examples: ./$0 backup -bb s3://my-cs-backups -s S3 ./$0 backup -bb s3://my-cs-backups -c pigz --quiet -sb - ./$0 backup -bb gs://my-cs-backups -s S3 --incremental 02-18-2022 + ./$0 backup -bb gs://my-cs-backups -s S3 --incremental 12-18-2023 ./$0 backup -bb s3://my-onpremise-bucket -s S3 -url http://127.0.0.1:8000 - + Cron Example: */60 */24 * * * root bash /root/$0 -bb s3://my-cs-backups -s S3 >> /root/csBackup.log 2>&1 "; @@ -405,23 +406,24 @@ print_backup_variables() printf "%-${s1}s %-${s2}s\n" "Highly Available:" "$HA"; printf "%-${s1}s %-${s2}s\n" "Incremental:" "$incremental"; printf "%-${s1}s %-${s2}s\n" "Timestamp:" "$(date +%m-%d-%Y-%H%M%S)"; + printf "%-${s1}s %-${s2}s\n" "Retention:" "$retention_days"; if [[ -n "$compress_format" ]]; then printf "%-${s1}s %-${s2}s\n" "Compression:" "true"; printf "%-${s1}s %-${s2}s\n" "Compression Format:" "$compress_format"; printf "%-${s1}s %-${s2}s\n" "Compression Threads:" "$PARALLEL_THREADS"; - else + else printf "%-${s1}s %-${s2}s\n" "Parallel Enabled:" "$parrallel_rsync"; if $parrallel_rsync ; then printf "%-${s1}s %-${s2}s\n" "Parallel Threads:" "$PARALLEL_THREADS"; fi; fi if [ $storage == "LocalStorage" ]; then printf "%-${s1}s %-${s2}s\n" "Backup Destination:" "$backup_destination"; - if [ $backup_destination == "Remote" ]; then printf "%-${s1}s %-${s2}s\n" "scp:" "$scp"; fi; + if [ $backup_destination == "Remote" ]; then printf "%-${s1}s %-${s2}s\n" "scp:" "$scp"; fi; printf "%-${s1}s %-${s2}s\n" "Backup Location:" "$backup_location"; fi - if [ $storage == "S3" ]; then + if [ $storage == "S3" ]; then printf "%-${s1}s %-${s2}s\n" "Active Bucket:" "$bucket"; printf "%-${s1}s %-${s2}s\n" "Backup Bucket:" "$backup_bucket"; fi @@ -429,7 +431,7 @@ print_backup_variables() } check_package_managers() { - + package_manager=''; if command -v apt &> /dev/null ; then if ! command -v dpkg-query &> /dev/null ; then @@ -441,21 +443,21 @@ check_package_managers() { if command -v yum &> /dev/null ; then package_manager="yum"; - fi + fi - if [ $package_manager == '' ]; then + if [ $package_manager == '' ]; then handle_failed_dependencies "[!!] No package manager found: yum or apt must be installed" exit 1; fi; } confirm_xmllint_installed() { - + if ! command -v xmllint > /dev/null; then printf "[!] xmllint not installed ... attempting auto install\n\n" check_package_managers case $package_manager in - yum ) + yum ) install_command="yum install libxml2 -y"; ;; apt ) @@ -479,7 +481,7 @@ confirm_rsync_installed() { printf "[!] rsync not installed ... attempting auto install\n\n" check_package_managers case $package_manager in - yum ) + yum ) install_command="yum install rsync -y"; ;; apt ) @@ -505,7 +507,7 @@ confirm_mariadb_backup_installed() { printf "[!] mariadb-backup not installed ... attempting auto install\n\n" check_package_managers case $package_manager in - yum ) + yum ) install_command="yum install MariaDB-backup -y"; ;; apt ) @@ -529,7 +531,7 @@ confirm_pigz_installed() { printf "[!] pigz not installed ... attempting auto install\n\n" check_package_managers case $package_manager in - yum ) + yum ) install_command="yum install pigz -y"; ;; apt ) @@ -547,13 +549,13 @@ confirm_pigz_installed() { fi } -check_for_dependancies() +check_for_dependancies() { # Check pidof works if [ $mode != "indirect" ] && ! command -v pidof > /dev/null; then handle_failed_dependencies "\n\n[!] Please make sure pidof is installed and executable\n\n" fi - + # used for save_brm and defining columnstore_user if ! command -v stat > /dev/null; then handle_failed_dependencies "\n\n[!] Please make sure stat is installed and executable\n\n" @@ -562,23 +564,23 @@ check_for_dependancies() confirm_rsync_installed confirm_mariadb_backup_installed - if [ $1 == "backup" ] && [ $mode != "indirect" ] && ! command -v dbrmctl > /dev/null; then + if [ $1 == "backup" ] && [ $mode != "indirect" ] && ! command -v dbrmctl > /dev/null; then handle_failed_dependencies "\n\n[!] dbrmctl unreachable to issue lock \n\n" fi if [ $storage == "S3" ]; then - + # Default cloud cloud="aws" - + # Critical argument for S3 - determine which cloud - if [ -z "$backup_bucket" ]; then handle_failed_dependencies "\n undefined --backup_bucket: $backup_bucket \nfor examples see: ./$0 backup --help\n"; fi + if [ -z "$backup_bucket" ]; then handle_failed_dependencies "\n\n[!] Undefined --backup-bucket: $backup_bucket \nfor examples see: ./$0 backup --help\n\n"; fi if [[ $backup_bucket == gs://* ]]; then cloud="gcp"; protocol="gs"; elif [[ $backup_bucket == s3://* ]]; then cloud="aws"; protocol="s3"; - else - handle_failed_dependencies "\n Invalid --backup_bucket - doesnt lead with gs:// or s3:// - $backup_bucket\n"; + else + handle_failed_dependencies "\n\n[!] Invalid --backup-bucket - doesnt lead with gs:// or s3:// - $backup_bucket\n\n"; fi if [ $cloud == "gcp" ]; then @@ -589,7 +591,7 @@ check_for_dependancies() gsutil=$(which mcs_gsutil 2>/dev/null) elif ! command -v gsutil > /dev/null; then which gsutil - echo "Hints: + echo "Hints: curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-443.0.0-linux-x86_64.tar.gz tar -xvf google-cloud-cli-443.0.0-linux-x86_64.tar.gz ./google-cloud-sdk/install.sh -q @@ -600,14 +602,14 @@ check_for_dependancies() or B) gcloud auth activate-service-account --key-file=user-file.json " - handle_failed_dependencies "\n\nPlease make sure gsutil cli is installed configured and executable\n" - else + handle_failed_dependencies "\n\n[!] Please make sure gsutil cli is installed configured and executable\n\n" + else gsutil=$(which gsutil 2>/dev/null) fi - + # gsutil sytax for silent if $quiet; then xtra_s3_args+="-q"; fi - else + else # on prem S3 will use aws cli # If AWS - Check aws-cli installed @@ -617,26 +619,26 @@ check_for_dependancies() awscli=$(which mcs_aws 2>/dev/null) elif ! command -v aws > /dev/null; then which aws - echo "Hints: + echo "Hints: curl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\" unzip awscliv2.zip sudo ./aws/install aws configure" - handle_failed_dependencies "\n\n Please make sure aws cli is installed configured and executable\nSee existing .cnf aws credentials with: grep aws_ $STORAGEMANGER_CNF \n\n" + handle_failed_dependencies "\n\n[!] Please make sure aws cli is installed configured and executable\nSee existing .cnf aws credentials with: grep aws_ $STORAGEMANGER_CNF \n\n" else awscli=$(which aws 2>/dev/null) fi - + # aws sytax for silent if $quiet; then xtra_s3_args+="--quiet"; fi fi; fi } -validation_prechecks_for_backup() +validation_prechecks_for_backup() { - echo "Prechecks ..." + echo "Prechecks" # Adjust rsync for incremental copy additional_rysnc_flags="" @@ -653,19 +655,19 @@ validation_prechecks_for_backup() # Detect if columnstore online if [ $mode == "direct" ]; then - if [ -z $(pidof PrimProc) ] || [ -z $(pidof WriteEngineServer) ]; then - printf " - Columnstore is OFFLINE \n"; - export columnstore_online=false; - else - printf " - Columnstore is ONLINE - safer if offline \n"; - export columnstore_online=true; + if [ -z $(pidof PrimProc) ] || [ -z $(pidof WriteEngineServer) ]; then + printf " - Columnstore is OFFLINE \n"; + export columnstore_online=false; + else + printf " - Columnstore is ONLINE - safer if offline \n"; + export columnstore_online=true; fi fi; # Validate compression option if [[ -n "$compress_format" ]]; then case $compress_format in - pigz) + pigz) confirm_pigz_installed ;; *) # unknown option @@ -683,78 +685,215 @@ validation_prechecks_for_backup() fi # Storage Based Checks - if [ $storage == "LocalStorage" ]; then + if [ $storage == "LocalStorage" ]; then # Incremental Job checks - if $incremental; then + if $incremental; then + + # $backup_location must exist to find an existing full back to add to + if [ ! -d $backup_location ]; then + handle_early_exit_on_backup "[X] Backup directory ($backup_location) DOES NOT exist ( -bl ) \n\n" true; + fi + + if [ "$today" == "auto_most_recent" ]; then + auto_select_most_recent_backup_for_incremental + fi + + # Validate $today is a non-empty value + if [ -z "$today" ]; then + handle_early_exit_on_backup "\nUndefined folder to increment on ($backup_location$today)\nTry --incremental or --incremental auto_most_recent \n" true + fi + # Cant continue if this folder (which represents a full backup) doesnt exists - if [ $backup_destination == "Local" ]; then - if [ -d $backup_location$today ]; then - printf "[+] Full backup directory exists\n"; - else - handle_early_exit_on_backup "[X] Full backup directory ($backup_location$today) DOES NOT exist \n\n" true; + if [ $backup_destination == "Local" ]; then + if [ -d $backup_location$today ]; then + printf " - Full backup directory exists\n"; + else + handle_early_exit_on_backup "[X] Full backup directory ($backup_location$today) DOES NOT exist \n\n" true; fi; elif [ $backup_destination == "Remote" ]; then - if [[ $(ssh $scp test -d $backup_location$today && echo exists) ]]; then - printf "[+] Full backup directory exists\n"; - else - handle_early_exit_on_backup "[X] Full backup directory ($backup_location$today) DOES NOT exist on remote $scp \n\n" true; + if [[ $(ssh $scp test -d $backup_location$today && echo exists) ]]; then + printf " - Full backup directory exists\n"; + else + handle_early_exit_on_backup "[X] Full backup directory ($backup_location$today) DOES NOT exist on remote $scp \n\n" true; fi fi fi elif [ $storage == "S3" ]; then - + # Adjust s3api flags for onpremise/custom endpoints add_s3_api_flags="" if [ -n "$s3_url" ]; then add_s3_api_flags+=" --endpoint-url $s3_url"; fi; if $no_verify_ssl; then add_s3_api_flags+=" --no-verify-ssl"; fi; - + # Validate addtional relevant arguments for S3 if [ -z "$backup_bucket" ]; then echo "Invalid --backup_bucket: $backup_bucket - is empty"; exit 1; fi # Check cli access to bucket if [ $cloud == "gcp" ]; then - + if $gsutil ls $backup_bucket > /dev/null ; then printf " - Success listing backup bucket\n" - else + else printf "\n[X] Failed to list bucket contents... \nCheck $gsutil credentials: $gsutil config -a" handle_early_exit_on_backup "\n$gsutil ls $backup_bucket \n\n" true fi - else + else # Check aws cli access to bucket if $( $awscli $add_s3_api_flags s3 ls $backup_bucket > /dev/null ) ; then printf " - Success listing backup bucket\n" - else + else printf "\n[X] Failed to list bucket contents... \nCheck aws cli credentials: aws configure" handle_early_exit_on_backup "\naws $add_s3_api_flags s3 ls $backup_bucket \n" true fi fi; - # Incremental Job checks + # Incremental Job checks if $incremental; then + if [ "$today" == "auto_most_recent" ]; then + auto_select_most_recent_backup_for_incremental + fi + + # Validate $today is a non-empty value + if [ -z "$today" ]; then + handle_early_exit_on_backup "\nUndefined folder to increment on ($backup_bucket/$today)\nTry --incremental or --incremental auto_most_recent \n" true + fi + # Cant continue if this folder (which represents a full backup) doesnt exists if [ $cloud == "gcp" ]; then - if [[ $( $gsutil ls $backup_bucket/$today | head ) ]]; then - printf "[+] Full backup directory exists\n"; - else - handle_early_exit_on_backup "[X] Full backup directory ($backup_bucket/$today) DOES NOT exist in GCS \nCheck - $gsutil ls $backup_bucket/$today | head \n\n" true; - fi + if [[ $( $gsutil ls $backup_bucket/$today | head ) ]]; then + printf " - Full backup directory exists\n"; + else + handle_early_exit_on_backup "[X] Full backup directory ($backup_bucket/$today) DOES NOT exist in GCS \nCheck - $gsutil ls $backup_bucket/$today | head \n\n" true; + fi else - if [[ $( $awscli $add_s3_api_flags s3 ls $backup_bucket/$today | head ) ]]; then - printf "[+] Full backup directory exists\n"; - else - handle_early_exit_on_backup "[X] Full backup directory ($backup_bucket/$today) DOES NOT exist in S3 \nCheck - aws $add_s3_api_flags s3 ls $backup_bucket/$today | head \n\n" true; - fi + if [[ $( $awscli $add_s3_api_flags s3 ls $backup_bucket/$today/ | head ) ]]; then + printf " - Full backup directory exists\n"; + else + handle_early_exit_on_backup "[X] Full backup directory ($backup_bucket/$today) DOES NOT exist in S3 \nCheck - aws $add_s3_api_flags s3 ls $backup_bucket/$today | head \n\n" true; + fi fi; fi - else + else handle_early_exit_on_backup "Invalid Variable storage: $storage" true fi } +# Used when "--incremental auto_most_recent" passed in during incremental backups +# This function identifies which backup directory is the most recent and sets today=x so that the incremental backup applies to said last full backup +# For LocalStorage: based on ls -td | head -n 1 +# For S3: using the awscli/gsutil, compare the dates of the backup folders restoreS3.job file to find the most recent S3 backup to increment ont top off +auto_select_most_recent_backup_for_incremental() { + + printf " - Searching for most recent backup ...." + if [ $storage == "LocalStorage" ]; then + most_recent_backup=$(ls -td "${backup_location}"* 2>/dev/null | head -n 1) + if [[ -z "$most_recent_backup" ]]; then + handle_early_exit_on_backup "\n[!!!] No backup found to increment in '$backup_location', please run a full backup or define a folder that exists --incremental \n" true + else + today=$(basename $most_recent_backup 2>/dev/null) + fi + + elif [ $storage == "S3" ]; then + current_date=$(date +%s) + backups=$(s3ls $backup_bucket) + most_recent_backup="" + most_recent_backup_time_diff=$((2**63 - 1)); + + while IFS= read -r line; do + + folder=$(echo "$line" | awk '{print substr($2, 1, length($2)-1)}') + date_time=$(s3ls "${backup_bucket}/${folder}/restore --recursive" | awk '{print $1,$2}') + + if [[ -n "$date_time" ]]; then + + # Parse the date + backup_date=$(date -d "$date_time" +%s) + + # Calculate the difference in days + time_diff=$(( (current_date - backup_date) )) + # echo "date_time: $date_time" + # echo "backup_date: $backup_date" + # echo "time_diff: $time_diff" + # echo "days_diff: $((time_diff / (60*60*24) ))" + + if [ $time_diff -lt $most_recent_backup_time_diff ]; then + most_recent_backup=$folder + most_recent_backup_time_diff=$time_diff + fi + fi + printf "." + done <<< "$backups" + + # printf "\n\nMost Recent: $most_recent_backup \n" + # printf "Time Diff: $most_recent_backup_time_diff \n" + + if [[ -z "$most_recent_backup" ]]; then + handle_early_exit_on_backup "\n[!!!] No backup found to increment, please run a full backup or define a folder that exists --incremental \n" true + exit 1; + else + today=$most_recent_backup + fi + fi + printf " selected: $today \n" +} + +apply_backup_retention_policy() { + + if [ $retention_days -eq 0 ]; then + printf " - Skipping Backup Rentention Policy\n" + return 0; + fi + + printf " - Applying Backup Rentention Policy...." + if [ $storage == "LocalStorage" ]; then + # example: find /tmp/backups/ -mindepth 1 -maxdepth 1 -type d -name "*" -amin +0 + find "$backup_location" -mindepth 1 -maxdepth 1 -type d -name "*" -mtime +$retention_days -exec rm -r {} \; + + elif [ $storage == "S3" ]; then + + current_date=$(date +%s) + backups=$(s3ls $backup_bucket) + + while IFS= read -r line; do + + delete_backup=false + folder=$(echo "$line" | awk '{print substr($2, 1, length($2)-1)}') + date_time=$(s3ls "${backup_bucket}/${folder}/restore --recursive" | awk '{print $1,$2}') + + if [[ -n "$date_time" ]]; then + + # Parse the date + backup_date=$(date -d "$date_time" +%s) + + # Calculate the difference in days + days_diff=$(( (current_date - backup_date) / (60*60*24) )) + # echo "line: $line" + # echo "date_time: $date_time" + # echo "backup_date: $backup_date" + # echo "days_diff: $days_diff" + + if [ $days_diff -gt "$retention_days" ]; then + delete_backup=true + fi + else + delete_backup=true + fi + + if $delete_backup; then + s3rm "${backup_bucket}/${folder}" + #echo "Deleting ${backup_bucket}/${folder}" + fi + printf "." + + done <<< "$backups" + fi + printf " Done\n" + +} + cs_read_only_wait_loop() { retry_limit=1800 retry_counter=0 @@ -766,7 +905,7 @@ cs_read_only_wait_loop() { printf "." current_status=$(dbrmctl status); if [ $? -ne 0 ]; then - handle_early_exit_on_backup "\n[!] Failed to get dbrmctl status\n\n" + handle_early_exit_on_backup "\n[!] Failed to get dbrmctl status\n\n" fi if [ $retry_counter -ge $retry_limit ]; then handle_early_exit_on_backup "\n[!] Set columnstore readonly wait retry limit exceeded: $retry_counter \n\n" @@ -777,18 +916,18 @@ cs_read_only_wait_loop() { printf "Done\n" } -# Having columnstore offline is best for non-volatile backups +# Having columnstore offline is best for non-volatile backups # If online - issue a flush tables with read lock and set DBRM to readonly -issue_write_locks() +issue_write_locks() { if [ $mode == "indirect" ] || $skip_locks; then printf "\n"; return; fi; - if ! $skip_mdb && ! pidof mariadbd > /dev/null; then - handle_early_exit_on_backup "\n[X] MariaDB is offline ... Needs to be online to issue read only lock and to run mariadb-backup \n\n" true; + if ! $skip_mdb && ! pidof mariadbd > /dev/null; then + handle_early_exit_on_backup "\n[X] MariaDB is offline ... Needs to be online to issue read only lock and to run mariadb-backup \n\n" true; fi; - printf "\nLocks \n"; + printf "\nLocks \n"; # Pre 23.10.2 CS startreadonly doesnt exist - so poll cpimports added to protect them - if ! $skip_polls; then + if ! $skip_polls; then poll_check_no_active_sql_writes poll_check_no_active_cpimports fi; @@ -800,43 +939,41 @@ issue_write_locks() if mariadb -e "FLUSH TABLES WITH READ LOCK;"; then mariadb -e "set global read_only=ON;" read_lock=true - printf " Done\n"; - else - handle_early_exit_on_backup "\n[X] Failed issuing read-only lock\n\n" - fi + printf " Done\n"; + else + handle_early_exit_on_backup "\n[X] Failed issuing read-only lock\n" + fi fi - - if [ $pm == "pm1" ]; then - + + if [ $pm == "pm1" ]; then + # Set Columnstore ReadOnly Mode startreadonly_exists=$(dbrmctl -h 2>&1 | grep "startreadonly") printf " - Issuing read-only lock to Columnstore Engine ... "; - if ! $columnstore_online; then - printf "Skip since offline\n\n"; + if ! $columnstore_online; then + printf "Skip since offline\n"; elif [ $DBROOT_COUNT == "1" ] && [[ -n "$startreadonly_exists" ]]; then if dbrmctl startreadonly ; then cs_read_only_wait_loop printf " \n"; else - handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via dbrmctl startreadonly\n" + handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via dbrmctl startreadonly\n" fi; - elif [ $DBROOT_COUNT == "1" ]; then - if dbrmctl readonly ; then - printf " \n"; - else - handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via dbrmctl readonly \n" + elif [ $DBROOT_COUNT == "1" ]; then + if ! dbrmctl readonly ; then + handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via dbrmctl readonly \n" fi - else + else if command -v mcs &> /dev/null && command -v jq &> /dev/null ; then if ! mcs cluster set mode --mode readonly | jq -r tostring ; then - handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via cmapi\n" + handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via cmapi\n" fi else # Older CS versions dont have mcs cli cmapiResponse=$(curl -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/mode-set --header 'Content-Type:application/json' --header "x-api-key:$cmapi_key" --data '{"timeout":20, "mode": "readonly"}' -k); - if [[ $cmapiResponse == '{"error":'* ]] ; then - handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via cmapi\n" + if [[ $cmapiResponse == '{"error":'* ]] ; then + handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via cmapi\n" else printf "$cmapiResponse \n"; fi @@ -886,7 +1023,7 @@ poll_check_no_active_cpimports() { printf "Done\n" no_cpimports=true break - else + else printf "." if ! $quiet; then printf "\n$active_cpimports"; fi; sleep "$poll_interval" @@ -899,53 +1036,56 @@ poll_check_no_active_cpimports() { fi; } -run_save_brm() +run_save_brm() { if $skip_save_brm || [ $pm != "pm1" ] || ! $columnstore_online || [ $mode == "indirect" ]; then return; fi; printf "\nBlock Resolution Manager\n" - local tmpDir="/tmp/DBRMbackup-$today" - if [ ! -d "$tmpDir" ]; then mkdir -p $tmpDir; fi; + local tmp_dir="/tmp/DBRMbackup-$today" + if [ ! -d "$tmp_dir" ]; then mkdir -p $tmp_dir; fi; # Copy extent map locally just in case save_brm fails - if [ $storage == "LocalStorage" ]; then - cp -R $DBRM_PATH/* $tmpDir - elif [ $storage == "S3" ]; then - cp -R $STORAGEMANAGER_PATH/* $tmpDir + if [ $storage == "LocalStorage" ]; then + printf " - Backing up DBRMs @ $tmp_dir ... " + cp -R $DBRM_PATH/* $tmp_dir + printf " Done \n" + elif [ $storage == "S3" ]; then + printf " - Backing up minimal DBRMs @ $tmp_dir ... " # Base Set - eval "smcat /data1/systemFiles/dbrm/BRM_saves_current > $tmpDir/BRM_saves_current $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_saves_journal > $tmpDir/BRM_saves_journal $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_saves_em > $tmpDir/BRM_saves_em $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_saves_vbbm > $tmpDir/BRM_saves_vbbm $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_saves_vss > $tmpDir/BRM_saves_vss $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_saves_current 2>/dev/null > $tmp_dir/BRM_saves_current $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_saves_journal 2>/dev/null > $tmp_dir/BRM_saves_journal $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_saves_em 2>/dev/null > $tmp_dir/BRM_saves_em $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_saves_vbbm 2>/dev/null > $tmp_dir/BRM_saves_vbbm $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_saves_vss 2>/dev/null > $tmp_dir/BRM_saves_vss $xtra_cmd_args" # A Set - eval "smcat /data1/systemFiles/dbrm/BRM_savesA_em > $tmpDir/BRM_savesA_em $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_savesA_vbbm > $tmpDir/BRM_savesA_vbbm $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_savesA_vss > $tmpDir/BRM_savesA_vss $xtra_cmd_args" - + eval "smcat /data1/systemFiles/dbrm/BRM_savesA_em 2>/dev/null > $tmp_dir/BRM_savesA_em $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_savesA_vbbm 2>/dev/null > $tmp_dir/BRM_savesA_vbbm $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_savesA_vss 2>/dev/null > $tmp_dir/BRM_savesA_vss $xtra_cmd_args" + # B Set - eval "smcat /data1/systemFiles/dbrm/BRM_savesB_em > $tmpDir/BRM_savesB_em $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_savesB_vbbm > $tmpDir/BRM_savesB_vbbm $xtra_cmd_args" - eval "smcat /data1/systemFiles/dbrm/BRM_savesB_vss > $tmpDir/BRM_savesB_vss $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_savesB_em 2>/dev/null > $tmp_dir/BRM_savesB_em $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_savesB_vbbm 2>/dev/null > $tmp_dir/BRM_savesB_vbbm $xtra_cmd_args" + eval "smcat /data1/systemFiles/dbrm/BRM_savesB_vss 2>/dev/null > $tmp_dir/BRM_savesB_vss $xtra_cmd_args" + printf " Done \n" fi printf " - Saving BRMs... \n" - brmOwner=$(stat -c "%U" $DBRM_PATH/) - if sudo -u $brmOwner /usr/bin/save_brm ; then - rm -rf $tmpDir - else - printf "\n Failed: save_brm error - see /var/log/messages - backup @ $tmpDir \n\n"; - handle_early_exit_on_backup + brm_owner=$(stat -c "%U" $DBRM_PATH/) + if sudo -u $brm_owner /usr/bin/save_brm ; then + rm -rf $tmp_dir + else + printf "\n Failed: save_brm error - see /var/log/messages - backup @ $tmpDir \n\n"; + handle_early_exit_on_backup fi - + } # Example: s3sync # Example: s3sync s3://$bucket $backup_bucket/$today/columnstoreData "Done - Columnstore data sync complete" -s3sync() +s3sync() { local from=$1 @@ -954,21 +1094,25 @@ s3sync() local failed_message=$4 local retries=${5:-0} local cmd="" - + if [ $cloud == "gcp" ]; then cmd="$gsutil $xtra_s3_args -m rsync -r -d $from $to" # gsutil throws WARNINGS if not directed to /dev/null if $quiet; then cmd+=" 2>/dev/null"; fi - else + eval "$cmd" + else # Default AWS cmd="$awscli $xtra_s3_args $add_s3_api_flags s3 sync $from $to" + $cmd fi - if eval $cmd; then + local exit_code=$? + + if [ $exit_code -eq 0 ]; then if [ -n "$success_message" ]; then printf "$success_message"; fi; else - if [ $retries -lt $RETRY_LIMIT ]; then + if [ $retries -lt $RETRY_LIMIT ]; then echo "$cmd" echo "Retrying: $retries" sleep 1; @@ -988,10 +1132,10 @@ s3cp() local from=$1 local to=$2 local cmd="" - + if [ $cloud == "gcp" ]; then cmd="$gsutil $xtra_s3_args cp $from $to" - else + else # Default AWS cmd="$awscli $xtra_s3_args $add_s3_api_flags s3 cp $from $to" fi; @@ -1000,15 +1144,15 @@ s3cp() } -# Example: s3rm +# Example: s3rm s3rm() { local path=$1 local cmd="" - + if [ $cloud == "gcp" ]; then cmd="$gsutil $xtra_s3_args -m rm -r $path" - else + else # Default AWS cmd="$awscli $xtra_s3_args $add_s3_api_flags s3 rm $path --recursive" fi; @@ -1016,15 +1160,15 @@ s3rm() $cmd } -# Example: s3ls +# Example: s3ls s3ls() { local path=$1 local cmd="" - + if [ $cloud == "gcp" ]; then cmd="$gsutil ls $path" - else + else # Default AWS cmd="$awscli $add_s3_api_flags s3 ls $path" fi; @@ -1032,19 +1176,19 @@ s3ls() $cmd } -clear_read_lock() +clear_read_lock() { if [ $mode == "indirect" ] || $skip_locks; then return; fi; - printf "\nClearing Locks\n"; - # Clear MDB Lock + printf "\nClearing Locks\n"; + # Clear MDB Lock if pidof mariadbd > /dev/null && [ $read_lock ]; then printf " - Clearing read-only lock on MariaDB Server ... "; if mariadb -e "UNLOCK TABLES;" && mariadb -qsNe "set global read_only=$ORIGINAL_READONLY_STATUS;"; then read_lock=false; printf " Done\n" - else + else handle_early_exit_on_backup "\n[X] Failed clearing readLock\n" true fi fi @@ -1052,27 +1196,27 @@ clear_read_lock() if [ $pm == "pm1" ]; then # Clear CS Lock - printf " - Clearing read-only lock on Columnstore Engine ... "; - if ! $columnstore_online; then + printf " - Clearing read-only lock on Columnstore Engine ... "; + if ! $columnstore_online; then printf "Skip since offline\n" - elif [ $DBROOT_COUNT == "1" ]; then + elif [ $DBROOT_COUNT == "1" ]; then if dbrmctl readwrite ; then printf " "; - else + else handle_early_exit_on_backup "\n[X] Failed clearing columnstore BRM lock via dbrmctl\n" true; fi elif command -v mcs &> /dev/null && command -v jq &> /dev/null ; then if ! mcs cluster set mode --mode readwrite | jq -r tostring ;then - handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via cmapi\n"; + handle_early_exit_on_backup "\n[X] Failed issuing columnstore BRM lock via cmapi\n"; fi - elif curl -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/mode-set --header 'Content-Type:application/json' --header "x-api-key:$cmapi_key" --data '{"timeout":20, "mode": "readwrite"}' -k ; then + elif curl -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/mode-set --header 'Content-Type:application/json' --header "x-api-key:$cmapi_key" --data '{"timeout":20, "mode": "readwrite"}' -k ; then printf " \n" else handle_early_exit_on_backup "\n[X] Failed clearing columnstore BRM lock\n" true; fi fi - -} + +} # handle_ called when certain checks/functionality fails handle_failed_dependencies() @@ -1085,7 +1229,7 @@ handle_failed_dependencies() # first argument is the error message # 2nd argument true = skip clear_read_lock, false= dont skip handle_early_exit_on_backup() -{ +{ skip_clear_locks=${2:-false} if ! $skip_clear_locks; then clear_read_lock; fi; printf "\nBackup Failed: $1\n" @@ -1123,11 +1267,11 @@ wait_on_rsync() local total=0 local w=0; - while [ $rsyncInProgress -gt "$concurrentThreshHold" ]; do - if ! $quiet && $visualize && [ $(($w % 10)) -eq 0 ]; then - if [[ $total == 0 ]]; then - total=$(du -sh /var/lib/columnstore/data$dbrootToSync/) - else + while [ $rsyncInProgress -gt "$concurrentThreshHold" ]; do + if ! $quiet && $visualize && [ $(($w % 10)) -eq 0 ]; then + if [[ $total == 0 ]]; then + total=$(du -sh /var/lib/columnstore/data$dbrootToSync/) + else echo -E "$rsyncInProgress rsync processes running ... seconds: $w" echo "Status: $(du -sh $backup_location$today/data$dbrootToSync/)" echo "Goal: $total" @@ -1141,12 +1285,12 @@ wait_on_rsync() done } -initiate_rsyncs() +initiate_rsyncs() { local dbrootToSync=$1 parallel_rysnc_flags=" -a " if $incremental ; then parallel_rysnc_flags+=" --inplace --no-whole-file --delete"; fi; - + deepParallelRsync /var/lib/columnstore/data$dbrootToSync 1 $DEPTH data$dbrootToSync & sleep 2; #jobs @@ -1154,19 +1298,19 @@ initiate_rsyncs() wait } -# A recursive function that increments depthCurrent+1 each directory it goes deeper and issuing rsync on each directory remaing at the target depth -# Example values: +# A recursive function that increments depthCurrent+1 each directory it goes deeper and issuing rsync on each directory remaing at the target depth +# Example values: # path: /var/lib/columnstore/data1 # depthCurrent: 1 # depthTarget: 3 # depthCurrent: data1 -deepParallelRsync() +deepParallelRsync() { path=$1 depthCurrent=$2 depthTarget=$3 relativePath=$4 - # echo "DEBUG: + # echo "DEBUG: # path=$1 # depthCurrent=$2 # depthTarget=$3 @@ -1186,15 +1330,15 @@ deepParallelRsync() #echo "DEBUG - copy to relative: $backup_location$today/$relativePath/" if ls $fullFilePath | xargs -P $PARALLEL_THREADS -I {} rsync $parallel_rysnc_flags $fullFilePath/{} $backup_location$today/$relativePath/$fileName ; then echo " + Completed: $backup_location$today/$relativePath/$fileName" - else + else echo "Failed: $backup_location$today/$relativePath/$fileName" exit 1; fi - + else - # echo "DEBUG - Fork Deeper - $fullFilePath " + # echo "DEBUG - Fork Deeper - $fullFilePath " wait_on_rsync false "0.5" $PARALLEL_FOLDERS - # Since target depth not reached, recursively call for each directory + # Since target depth not reached, recursively call for each directory deepParallelRsync $fullFilePath "$((depthCurrent+1))" $depthTarget "$relativePath/$fileName" & fi @@ -1202,8 +1346,8 @@ deepParallelRsync() elif [ -f $fullFilePath ]; then rsync $additional_rysnc_flags $fullFilePath $backup_location$today/$relativePath/ - # If filename is * then the directory is empty - elif [ "$fileName" == "*" ]; then + # If filename is * then the directory is empty + elif [ "$fileName" == "*" ]; then # echo "DEBUG - Skipping $relativePath - empty"; continue else @@ -1214,8 +1358,8 @@ deepParallelRsync() } run_backup() -{ - if [ $storage == "LocalStorage" ]; then +{ + if [ $storage == "LocalStorage" ]; then if [ $backup_destination == "Local" ]; then printf "\nLocal Storage Backup\n" @@ -1226,34 +1370,34 @@ run_backup() mkdir -p $backup_location$today else mkdir -p $backup_location$today/mysql - mkdir -p $backup_location$today/configs + mkdir -p $backup_location$today/configs mkdir -p $backup_location$today/configs/mysql - + # Check/Create CS Data Directories i=1 while [ $i -le $DBROOT_COUNT ]; do if [[ $ASSIGNED_DBROOT == "$i" || $HA == true ]]; then mkdir -p $backup_location$today/data$i ; fi ((i++)) - done + done fi - printf " Done\n" - + printf " Done\n" + # Backup Columnstore data i=1 while [ $i -le $DBROOT_COUNT ]; do - if [[ $ASSIGNED_DBROOT == "$i" || $HA == true ]]; then + if [[ $ASSIGNED_DBROOT == "$i" || $HA == true ]]; then if [[ -n "$compress_format" ]]; then # For compression keep track of dirs & files to include and compress/stream in the end - $compress_paths - compress_paths="/var/lib/columnstore/data$i/* " + compress_paths="/var/lib/columnstore/data$i/* " - elif $parrallel_rsync ; then + elif $parrallel_rsync ; then printf " - Parallel Rsync CS Data$i... \n" initiate_rsyncs $i - printf " Done\n" + printf " Done\n" else printf " - Syncing Columnstore Data$i... " eval "rsync $additional_rysnc_flags /var/lib/columnstore/data$i/* $backup_location$today/data$i/ $xtra_cmd_args"; - printf " Done\n" + printf " Done\n" fi; fi ((i++)) @@ -1261,7 +1405,7 @@ run_backup() # Backup MariaDB data if [ $ASSIGNED_DBROOT == "1" ]; then - + # logic to increment mysql and keep count if we want to backup incremental mysql data # i=1 # latestMysqlIncrement=0 @@ -1270,7 +1414,7 @@ run_backup() # done # MariaDB backup wont rerun if folder exists - so clear it before running mariadb-backup - if [ -d $backup_location$today/mysql ]; then + if [ -d $backup_location$today/mysql ]; then rm -rf $backup_location$today/mysql fi @@ -1281,20 +1425,20 @@ run_backup() mbd_prefix="$backup_location$today/$split_file_mdb_prefix.$compress_format" case $compress_format in pigz) - # Handle Cloud + # Handle Cloud if ! mariabackup --user=root --backup --stream xbstream --parallel $PARALLEL_THREADS --ftwrl-wait-timeout=$timeout --ftwrl-wait-threshold=999999 --extra-lsndir=/tmp/checkpoint_out 2>>$logfile | pigz -p $PARALLEL_THREADS -c > $mbd_prefix 2>> $logfile; then handle_early_exit_on_backup "\nFailed mariabackup --user=root --backup --stream xbstream --parallel $PARALLEL_THREADS --ftwrl-wait-timeout=$timeout --ftwrl-wait-threshold=999999 --extra-lsndir=/tmp/checkpoint_out 2>>$logfile | pigz -p $PARALLEL_THREADS -c > $mbd_prefix 2>> $logfile \n" fi printf " Done @ $mbd_prefix\n" ;; *) # unknown option - handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" esac else printf " - Copying MariaDB Data... " if eval "mariadb-backup --backup --target-dir=$backup_location$today/mysql --user=root $xtra_cmd_args" ; then printf " Done \n"; else printf "\n Failed: mariadb-backup --backup --target-dir=$backup_location$today/mysql --user=root\n"; handle_early_exit_on_backup; fi fi - else + else echo "[!] Skipping mariadb-backup" fi @@ -1321,18 +1465,18 @@ run_backup() printf " - Copying MariaDB Configs... " mkdir -p $backup_location$today/configs/mysql/$pm/ eval "rsync $additional_rysnc_flags $MARIADB_SERVER_CONFIGS_PATH/* $backup_location$today/configs/mysql/$pm/ $xtra_cmd_args" - printf " Done\n\n" + printf " Done\n" else compress_paths+=" $MARIADB_SERVER_CONFIGS_PATH/*" fi fi - + # Handle compression for Columnstore Data & Configs if [[ -n "$compress_format" ]]; then cs_prefix="$backup_location$today/$split_file_cs_prefix.$compress_format" compressed_split_size="250M"; case $compress_format in - pigz) + pigz) printf " - Compressing CS Data & Configs... " if ! eval "tar cf - $compress_paths 2>>$logfile | pigz -p $PARALLEL_THREADS -c > $cs_prefix 2>> $logfile"; then handle_early_exit_on_backup "[!] - Compression Failed \ntar cf - $compress_paths 2>>$logfile | pigz -p $PARALLEL_THREADS -c > $cs_prefix 2>> $logfile \n" @@ -1340,26 +1484,26 @@ run_backup() printf " Done @ $cs_prefix\n" ;; *) # unknown option - handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" esac fi - if $incremental ; then + if $incremental ; then # Log each incremental run now=$(date "+%m-%d-%Y %H:%M:%S"); echo "$pm updated on $now" >> $backup_location$today/incrementallyUpdated.txt final_message="Incremental Backup Complete" - else + else # Create restore job file extra_flags="" - if [[ -n "$compress_format" ]]; then extra_flags+=" -c $compress_format"; fi; - if $skip_mdb; then extra_flags+=" --skip-mariadb-backup"; fi; + if [[ -n "$compress_format" ]]; then extra_flags+=" -c $compress_format"; fi; + if $skip_mdb; then extra_flags+=" --skip-mariadb-backup"; fi; echo "./$0 restore -l $today -bl $backup_location -bd $backup_destination -s $storage --dbroots $DBROOT_COUNT -m $mode $extra_flags --quiet" > $backup_location$today/restore.job fi final_message+=" @ $backup_location$today" - + elif [ $backup_destination == "Remote" ]; then - + # Check/Create Directories on remote server printf "[+] Checking Remote Directories... " i=1 @@ -1371,16 +1515,16 @@ run_backup() echo $makeDirectories ssh $scp " mkdir -p $backup_location$today/mysql ; $makeDataDirectories - mkdir -p $backup_location$today/configs ; - mkdir -p $backup_location$today/configs/mysql; + mkdir -p $backup_location$today/configs ; + mkdir -p $backup_location$today/configs/mysql; mkdir -p $backup_location$today/configs/mysql/$pm " printf " Done\n" - + printf "[~] Rsync Remote Columnstore Data... \n" i=1 while [ $i -le $DBROOT_COUNT ]; do #if [ $pm == "pm$i" ]; then rsync -a $additional_rysnc_flags /var/lib/columnstore/data$i/* $scp:$backup_location$today/data$i/ ; fi - if [ $pm == "pm$i" ]; then + if [ $pm == "pm$i" ]; then find /var/lib/columnstore/data$i/* -mindepth 1 -maxdepth 2 | xargs -P $PARALLEL_THREADS -I {} rsync $additional_rysnc_flags /var/lib/columnstore/data$i/* $scp:$backup_location$today/data$i/ fi ((i++)) @@ -1399,27 +1543,27 @@ run_backup() if ! $skip_mdb; then printf "[~] Backing up mysql... \n" mkdir -p $backup_location$today - if mariadb-backup --backup --target-dir=$backup_location$today --user=root ; then + if mariadb-backup --backup --target-dir=$backup_location$today --user=root ; then rsync -a $backup_location$today/* $scp:$backup_location$today/mysql rm -rf $backup_location$today - else - printf "\n Failed: mariadb-backup --backup --target-dir=$backup_location$today --user=root\n\n"; - handle_early_exit_on_backup + else + printf "\n Failed: mariadb-backup --backup --target-dir=$backup_location$today --user=root\n\n"; + handle_early_exit_on_backup fi - else + else echo "[!] Skipping mariadb-backup" fi; - + # Backup CS configurations - rsync $additional_rysnc_flags $CS_CONFIGS_PATH/Columnstore.xml $scp:$backup_location$today/configs/ + rsync $additional_rysnc_flags $CS_CONFIGS_PATH/Columnstore.xml $scp:$backup_location$today/configs/ rsync $additional_rysnc_flags $STORAGEMANGER_CNF $scp:$backup_location$today/configs/ - if [ -f $CS_CONFIGS_PATH/cmapi_server.conf ]; then - rsync $additional_rysnc_flags $CS_CONFIGS_PATH/cmapi_server.conf $scp:$backup_location$today/configs/; + if [ -f $CS_CONFIGS_PATH/cmapi_server.conf ]; then + rsync $additional_rysnc_flags $CS_CONFIGS_PATH/cmapi_server.conf $scp:$backup_location$today/configs/; fi fi - - - if $incremental ; then + + + if $incremental ; then now=$(date "+%m-%d-%Y +%H:%M:%S") ssh $scp "echo \"$pm updated on $now\" >> $backup_location$today/incrementallyUpdated.txt" final_message="Incremental Backup Complete" @@ -1432,7 +1576,7 @@ run_backup() elif [ $storage == "S3" ]; then - printf "S3 Backup\n" + printf "\nS3 Backup\n" # Conconsistency check - wait for assigned journal dir to be empty trap handle_ctrl_c_backup SIGINT i=1 @@ -1440,8 +1584,8 @@ run_backup() max_wait=180 printf " - Checking storagemanager/journal/data$ASSIGNED_DBROOT/* " while [[ $j_counts -gt 0 ]]; do - if [ $i -gt $max_wait ]; then handle_early_exit_on_backup "\n[!] max_wait exceeded for $cs_journal/data$ASSIGNED_DBROOT/* to sync with bucket "; fi; - if (( $i%10 == 0 )); then printf "\n[!] Not empty yet - found $j_counts files @ $cs_journal/data$ASSIGNED_DBROOT/*\n"; fi; + if [ $i -gt $max_wait ]; then printf "[!] Maybe you have orphaned journal files, active writes or an unreachable bucket \n"; handle_early_exit_on_backup "\n[!] max_wait exceeded for $cs_journal/data$ASSIGNED_DBROOT/* to sync with bucket "; ls -la $cs_journal/data$ASSIGNED_DBROOT/*; fi; + if (( $i%10 == 0 )); then printf "\n[!] Not empty yet - found $j_counts files @ $cs_journal/data$ASSIGNED_DBROOT/*\n"; printf " - Checking storagemanager/journal/data$ASSIGNED_DBROOT/* "; fi; sleep 1 i=$(($i+1)) j_counts=$(find $cs_journal/data$ASSIGNED_DBROOT/* -type f 2>/dev/null | wc -l) @@ -1453,14 +1597,14 @@ run_backup() if [[ -z "$compress_format" ]]; then printf " - Syncing Columnstore Metadata \n" - if $HA; then + if $HA; then s3sync $STORAGEMANAGER_PATH $backup_bucket/$today/storagemanager " - Done storagemanager/*\n" "\n\n[!!!] sync failed - storagemanager/*\n\n"; - else + else s3sync $cs_cache/data$ASSIGNED_DBROOT $backup_bucket/$today/storagemanager/cache/data$ASSIGNED_DBROOT " + cache/data$ASSIGNED_DBROOT\n" "\n\n[!!!] sync failed - cache/data$ASSIGNED_DBROOT\n\n"; s3sync $cs_metadata/data$ASSIGNED_DBROOT $backup_bucket/$today/storagemanager/metadata/data$ASSIGNED_DBROOT " + metadata/data$ASSIGNED_DBROOT\n" "\n\n[!!!] sync failed - metadata/data$ASSIGNED_DBROOT\n\n" s3sync $cs_journal/data$ASSIGNED_DBROOT $backup_bucket/$today/storagemanager/journal/data$ASSIGNED_DBROOT " + journal/data$ASSIGNED_DBROOT\n" "\n\n[!!!] sync failed - journal/data$ASSIGNED_DBROOT\n\n" fi; - + else # For compression keep track of dirs & files to include and compress/stream in the end - $compress_paths compress_paths="$cs_cache/data$ASSIGNED_DBROOT $cs_metadata/data$ASSIGNED_DBROOT $cs_journal/data$ASSIGNED_DBROOT " @@ -1468,7 +1612,7 @@ run_backup() # PM1 mostly backups everything else if [ $ASSIGNED_DBROOT == "1" ]; then - + # Backup CS configurations if [[ -z "$compress_format" ]]; then s3sync $CS_CONFIGS_PATH/ $backup_bucket/$today/configs/ " + $CS_CONFIGS_PATH/\n" "\n\n[!!!] sync failed - $CS_CONFIGS_PATH/\n\n"; @@ -1480,7 +1624,7 @@ run_backup() if ! $skip_bucket_data; then printf " - Saving Columnstore data ... " s3sync $protocol://$bucket $backup_bucket/$today/columnstoreData " Done \n" - else + else printf " - [!] Skipping columnstore bucket data \n" fi @@ -1492,12 +1636,12 @@ run_backup() mbd_prefix="$backup_bucket/$today/$split_file_mdb_prefix.$compress_format" case $compress_format in pigz) - # Handle Cloud + # Handle Cloud if [ $cloud == "gcp" ]; then if ! mariabackup --user=root --backup --stream xbstream --parallel $PARALLEL_THREADS --ftwrl-wait-timeout=$timeout --ftwrl-wait-threshold=999999 --extra-lsndir=/tmp/checkpoint_out 2>>$logfile | pigz -p $PARALLEL_THREADS 2>> $logfile | split -d -a 5 -b 250M --filter="gsutil cp - ${mbd_prefix}_\$FILE 2>$logfile" - chunk 2>$logfile; then handle_early_exit_on_backup "\nFailed mariadb-backup --backup --stream xbstream --parallel $PARALLEL_THREADS --ftwrl-wait-timeout=$timeout --ftwrl-wait-threshold=999999 --extra-lsndir=/tmp/checkpoint_out 2>>$logfile | pigz -p $PARALLEL_THREADS 2>> $logfile | split -d -a 5 -b 250M --filter=\"gsutil cp - ${mbd_prefix}_\$FILE 2>$logfile\" - chunk 2>$logfile \n" fi - + elif [ $cloud == "aws" ]; then if ! mariabackup --user=root --backup --stream xbstream --parallel $PARALLEL_THREADS --ftwrl-wait-timeout=$timeout --ftwrl-wait-threshold=999999 --extra-lsndir=/tmp/checkpoint_out 2>>$logfile | pigz -p $PARALLEL_THREADS 2>> $logfile | split -d -a 5 -b 250M --filter="aws s3 cp - ${mbd_prefix}_\$FILE 2>$logfile 1>&2" - chunk 2>$logfile; then handle_early_exit_on_backup "\nFailed mariadb-backup --backup --stream xbstream --parallel $PARALLEL_THREADS --ftwrl-wait-timeout=$timeout --ftwrl-wait-threshold=999999 --extra-lsndir=/tmp/checkpoint_out 2>>$logfile | pigz -p $PARALLEL_THREADS 2>> $logfile | split -d -a 5 -b 250M --filter=\"aws s3 cp - ${mbd_prefix}_\$FILE 2>$logfile 1>&2\" - chunk 2>$logfile\n" @@ -1506,7 +1650,7 @@ run_backup() printf " Done @ $mbd_prefix\n" ;; *) # unknown option - handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" esac else @@ -1523,11 +1667,11 @@ run_backup() s3rm $backup_bucket/$today/mysql/ s3sync $backup_location$today/mysql $backup_bucket/$today/mysql/ " + mariadb-backup to bucket @ $backup_bucket/$today/mysql/ \n" rm -rf $backup_location$today/mysql - else + else handle_early_exit_on_backup "\nFailed making directory for MariaDB backup\ncommand: mkdir -p $backup_location$today/mysql\n" fi fi - else + else echo "[!] Skipping mariadb-backup" fi fi @@ -1547,13 +1691,13 @@ run_backup() cs_prefix="$backup_bucket/$today/$split_file_cs_prefix.$compress_format" compressed_split_size="250M"; case $compress_format in - pigz) + pigz) printf " - Compressing CS local files -> bucket ... " if [ $cloud == "gcp" ]; then if ! eval "tar cf - $compress_paths 2>>$logfile | pigz -p $PARALLEL_THREADS 2>> $logfile | split -d -a 5 -b $compressed_split_size --filter=\"gsutil cp - ${cs_prefix}_\\\$FILE 2>$logfile 1>&2\" - chunk 2>$logfile"; then handle_early_exit_on_backup "[!] - Compression/Split/Upload Failed \n" fi - + elif [ $cloud == "aws" ]; then if ! eval "tar cf - $compress_paths 2>>$logfile | pigz -p $PARALLEL_THREADS 2>> $logfile | split -d -a 5 -b $compressed_split_size --filter=\"aws s3 cp - ${cs_prefix}_\\\$FILE 2>$logfile 1>&2\" - chunk 2>$logfile"; then handle_early_exit_on_backup "[!] - Compression/Split/Upload Failed \n" @@ -1562,15 +1706,15 @@ run_backup() printf " Done @ $cs_prefix\n" ;; *) # unknown option - handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" esac fi # Create restore job file & update incremental log - if $incremental ; then + if $incremental ; then now=$(date "+%m-%d-%Y +%H:%M:%S") increment_file="incremental_history.txt" - + # download S3 file, append to it and reupload if [[ $(s3ls "$backup_bucket/$today/$increment_file") ]]; then s3cp $backup_bucket/$today/$increment_file $increment_file @@ -1583,23 +1727,23 @@ run_backup() else # Create restore job file extra_flags="" - if [[ -n "$compress_format" ]]; then extra_flags+=" -c $compress_format"; fi; - if $skip_mdb; then extra_flags+=" --skip-mariadb-backup"; fi; - if $skip_bucket_data; then extra_flags+=" --skip-bucket-data"; fi; + if [[ -n "$compress_format" ]]; then extra_flags+=" -c $compress_format"; fi; + if $skip_mdb; then extra_flags+=" --skip-mariadb-backup"; fi; + if $skip_bucket_data; then extra_flags+=" --skip-bucket-data"; fi; if [ -n "$s3_url" ]; then extra_flags+=" -url $s3_url"; fi; echo "./$0 restore -l $today -s $storage -bb $backup_bucket -dbs $DBROOT_COUNT -m $mode -nb $protocol://$bucket $extra_flags --quiet --continue" > restoreS3.job s3cp restoreS3.job $backup_bucket/$today/restoreS3.job rm -rf restoreS3.job fi final_message+=" @ $backup_bucket/$today" - + fi - + clear_read_lock end=$(date +%s) runtime=$((end-start)) - printf "\n[+] Runtime: $runtime\n" - printf "[+] $final_message\n\n" + printf "\nRuntime: $runtime\n" + printf "$final_message\n\n" } load_default_restore_variables() @@ -1621,19 +1765,19 @@ load_default_restore_variables() scp="" # What storage topogoly was used by Columnstore in the backup - found in storagemanager.cnf - # Options: "LocalStorage" or "S3" + # Options: "LocalStorage" or "S3" storage="LocalStorage" # Flag for high available systems (meaning shared storage exists supporting the topology so that each node sees all data) HA=false # When set to true skips the enforcement that new_bucket should be empty prior to starting a restore - continue=false + continue=false # modes ['direct','indirect'] - direct backups run on the columnstore nodes themselves. indirect run on another machine that has read-only mounts associated with columnstore/mariadb mode="direct" - # Name of the Configuration file to load variables from + # Name of the Configuration file to load variables from config_file=".cs-backup-config" # Only used if storage=S3 @@ -1815,7 +1959,7 @@ parse_restore_variables() shift # past argument ;; -nv-ssl| --no-verify-ssl) - no_verify_ssl=true + no_verify_ssl=true shift # past argument ;; -h|--help|-help|help) @@ -1834,7 +1978,7 @@ parse_restore_variables() } print_restore_help_text() -{ +{ echo " Columnstore Restore @@ -1848,25 +1992,26 @@ print_restore_help_text() -nv-ssl| --no-verify-ssl) Skips verifying ssl certs, useful for onpremise s3 storage -s | --storage The storage used by columnstore data 'LocalStorage' or 'S3' -pm | --nodeid Forces the handling of the restore as this node as opposed to whats detected on disk - -nb | --new-bucket Defines the new bucket to copy the s3 data to from the backup bucket. + -nb | --new-bucket Defines the new bucket to copy the s3 data to from the backup bucket. Use -nb if the new restored cluster should use a different bucket than the backup bucket itself - -nr | --new-region Defines the region of the new bucket to copy the s3 data to from the backup bucket - -nk | --new-key Defines the aws key to connect to the new_bucket - -ns | --new-secret Defines the aws secret of the aws key to connect to the new_bucket + -nr | --new-region Defines the region of the new bucket to copy the s3 data to from the backup bucket + -nk | --new-key Defines the aws key to connect to the new_bucket + -ns | --new-secret Defines the aws secret of the aws key to connect to the new_bucket -f | --config-file Path to backup configuration file to load variables from -cont| --continue This acknowledges data in your --new_bucket is ok to delete when restoring S3 -smdb| --skip-mariadb-backup Skip restoring mariadb server via mariadb-backup - ideal for only restoring columnstore -sb | --skip-bucket-data Skip restoring columnstore data in the bucket - ideal if looking to only restore mariadb server -q | --quiet Silence verbose copy command outputs -c | --compress Hint that the backup is compressed in X format - Options: [ pigz ] + -P | --parallel Number of parallel decompression and mbstream threads to run -ha | --highavilability Hint for if shared storage is attached @ below on all nodes to see all data HA LocalStorage ( /var/lib/columnstore/dataX/ ) - HA S3 ( /var/lib/columnstore/storagemanager/ ) + HA S3 ( /var/lib/columnstore/storagemanager/ ) Local Storage Examples: ./$0 restore -s LocalStorage -bl /tmp/backups/ -bd Local -l 12-29-2021 ./$0 restore -s LocalStorage -bl /tmp/backups/ -bd Remote -scp root@172.31.6.163 -l 12-29-2021 - + S3 Storage Examples: ./$0 restore -s S3 -bb s3://my-cs-backups -l 12-29-2021 ./$0 restore -s S3 -bb gs://on-premise-bucket -l 12-29-2021 -url http://127.0.0.1:8000 @@ -1886,7 +2031,7 @@ print_restore_variables() printf "%-${s1}s %-${s2}s\n" "Configuration File:" "$config_file"; source $config_file fi - if [ $mode == "indirect" ] || $skip_mdb || $skip_bucket_data; then + if [ $mode == "indirect" ] || $skip_mdb || $skip_bucket_data; then echo "------------------------------------------------" echo "Skips: MariaDB($skip_mdb) Bucket($skip_bucket_data)"; fi; @@ -1894,10 +2039,11 @@ print_restore_variables() if [[ -n "$compress_format" ]]; then echo "Compression: true" echo "Compression Format: $compress_format"; - else + echo "Decompression Threads:" "$PARALLEL_THREADS"; + else echo "Compression: false" fi - if [ $storage == "LocalStorage" ]; then + if [ $storage == "LocalStorage" ]; then echo "Backup Location: $backup_location" echo "Backup Destination: $backup_destination" echo "Scp: $scp" @@ -1909,7 +2055,7 @@ print_restore_variables() echo "PM Number: $pm_number" elif [ $storage == "S3" ]; then - echo "Backup Location: $backup_location" + echo "Backup Location: $backup_location" echo "Storage: $storage" echo "Load Date: $load_date" echo "timestamp: $(date +%m-%d-%Y-%H%M%S)" @@ -1932,7 +2078,7 @@ print_restore_variables() validation_prechecks_for_restore() { echo "Prechecks ..." - if [ $storage != "LocalStorage" ] && [ $storage != "S3" ]; then handle_early_exit_on_restore "Invalid script variable storage: $storage\n"; fi + if [ $storage != "LocalStorage" ] && [ $storage != "S3" ]; then handle_early_exit_on_restore "Invalid script variable storage: $storage\n"; fi if [ -z "$load_date" ]; then handle_early_exit_on_restore "\n[!!!] Required field --load: $load_date - is empty\n" ; fi if [ -z "$mode" ]; then handle_early_exit_on_restore "\n[!!!] Required field --mode: $mode - is empty\n" ; fi if [ "$mode" != "direct" ] && [ "$mode" != "indirect" ] ; then handle_early_exit_on_restore "\n[!!!] Invalid field --mode: $mode\n"; fi @@ -1944,7 +2090,7 @@ validation_prechecks_for_restore() { if [ -n "$s3_url" ]; then add_s3_api_flags+=" --endpoint-url $s3_url"; fi if $no_verify_ssl; then add_s3_api_flags+=" --no-verify-ssl"; fi; - # If remote backup - Validate that scp works + # If remote backup - Validate that scp works if [ $backup_destination == "Remote" ]; then if ssh $scp echo ok 2>&1 ;then printf 'SSH Works\n\n' @@ -1954,17 +2100,17 @@ validation_prechecks_for_restore() { fi # Make sure the database is offline - if [ "$mode" == "direct" ]; then - if [ -z $(pidof PrimProc) ]; then - printf " - Columnstore Status ... Offline\n"; - else - handle_early_exit_on_restore "\n[X] Columnstore is ONLINE - please turn off \n\n"; + if [ "$mode" == "direct" ]; then + if [ -z $(pidof PrimProc) ]; then + printf " - Columnstore Status ... Offline\n"; + else + handle_early_exit_on_restore "\n[X] Columnstore is ONLINE - please turn off \n\n"; fi - - if [ -z $(pidof mariadbd) ]; then - printf " - MariaDB Server Status ... Offline\n"; - else - handle_early_exit_on_restore "\n[X] MariaDB is ONLINE - please turn off \n\n"; + + if [ -z $(pidof mariadbd) ]; then + printf " - MariaDB Server Status ... Offline\n"; + else + handle_early_exit_on_restore "\n[X] MariaDB is ONLINE - please turn off \n\n"; fi fi @@ -1972,7 +2118,7 @@ validation_prechecks_for_restore() { check_package_managers cmapi_installed_command="" case $package_manager in - yum ) + yum ) cmapi_installed_command="yum list installed MariaDB-columnstore-cmapi &> /dev/null;"; ;; apt ) @@ -1984,33 +2130,33 @@ validation_prechecks_for_restore() { esac if eval $cmapi_installed_command ; then - if [ -z $(pidof /usr/share/columnstore/cmapi/python/bin/python3) ]; then - printf " - Columnstore Management API Status .. Offline\n"; - else - handle_early_exit_on_restore "\n[X] Cmapi is ONLINE - please turn off \n\n"; + if ! sudo mcs cmapi is-ready ; then + printf " - Columnstore Management API Status .. Offline\n"; + else + handle_early_exit_on_restore "\n[X] Cmapi is ONLINE - please turn off \n\n"; fi else - printf " - Columnstore Management API Status .. Not Installed - Skipping\n"; + printf " - Columnstore Management API Status .. Not Installed - Skipping\n"; fi # Validate addtional relevant arguments per storage option - if [ $storage == "LocalStorage" ]; then - + if [ $storage == "LocalStorage" ]; then + if [ -z "$backup_location" ]; then handle_early_exit_on_restore "Invalid --backup_location: $backup_location - is empty"; fi if [ -z "$backup_destination" ]; then handle_early_exit_on_restore "Invalid --backup_destination: $backup_destination - is empty"; fi if [ $backup_destination == "Remote" ] && [ -d $backup_location$load_date ]; then echo "Switching to '-bd Local'"; backup_destination="Local"; fi if [ $backup_destination == "Local" ]; then - + if [ ! -d $backup_location ]; then handle_early_exit_on_restore "Invalid directory --backup_location: $backup_location - doesnt exist"; fi if [ -z "$(ls -A "$backup_location")" ]; then echo "Invalid --backup_location: $backup_location - directory is empty."; fi; if [ ! -d $backup_location$load_date ]; then handle_early_exit_on_restore "Invalid directory --load: $backup_location$load_date - doesnt exist"; else printf " - Backup directory exists\n"; fi fi; local files=$(ls $backup_location$load_date) - if [[ -n "$compress_format" ]]; then + if [[ -n "$compress_format" ]]; then case "$compress_format" in - pigz) - flag_cs_local=false + pigz) + flag_cs_local=false flag_mysql=$skip_mdb while read -r line; do @@ -2031,17 +2177,17 @@ validation_prechecks_for_restore() { fi ;; *) # unknown option - handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" esac - else - + else + flag_configs=false flag_data=false flag_mysql=$skip_mdb files=$(ls $backup_location$load_date ) while read -r line; do folder_name=$(echo "$line" | awk '{ print $NF }') - + case "$folder_name" in *configs*) flag_configs=true ;; *data*) flag_data=true ;; @@ -2059,15 +2205,15 @@ validation_prechecks_for_restore() { handle_early_exit_on_restore " Backup is missing subdirectories... \nls $backup_location$load_date\n" fi fi - - elif [ $storage == "S3" ]; then - + + elif [ $storage == "S3" ]; then + # Check for concurrency settings - https://docs.aws.amazon.com/cli/latest/topic/s3-config.html if [ $cloud == "aws" ]; then cli_concurrency=$(sudo grep max_concurrent_requests ~/.aws/config 2>/dev/null ); if [ -z "$cli_concurrency" ]; then echo "[!!!] check: '~/.aws/config' - We recommend increasing s3 concurrency for better throughput to/from S3. This value should scale with avaialble CPU and networking capacity"; printf "example: aws configure set default.s3.max_concurrent_requests 200\n"; fi; fi; # Validate addtional relevant arguments if [ -z "$backup_bucket" ]; then handle_early_exit_on_restore "Invalid --backup_bucket: $backup_bucket - is empty"; fi - + # Prepare commands for each cloud if [ $cloud == "gcp" ]; then check1="$gsutil ls $backup_bucket"; @@ -2075,7 +2221,7 @@ validation_prechecks_for_restore() { check3="$gsutil ls $new_bucket"; check4="$gsutil ls $backup_bucket/$load_date"; check5="$gsutil ls $backup_bucket/$load_date/"; - else + else check1="$awscli $add_s3_api_flags s3 ls $backup_bucket" check2="$awscli $add_s3_api_flags s3 ls | grep $new_bucket" check3="$awscli $add_s3_api_flags s3 ls $new_bucket" @@ -2086,33 +2232,33 @@ validation_prechecks_for_restore() { # Check aws cli access to bucket if $check1 > /dev/null ; then echo -e " - Success listing backup bucket" - else + else handle_early_exit_on_restore "[X] Failed to list bucket contents...\n$check1\n" fi # New bucket exists and empty check - if [ "$new_bucket" ] && [ $pm == "pm1" ]; then + if [ "$new_bucket" ] && [ $pm == "pm1" ]; then # Removing as storage.buckets.get permission likely not needed - # new_bucket_exists=$($check2); if [ -z "$new_bucket_exists" ]; then handle_early_exit_on_restore "[!!!] Didnt find new bucket - Check: $check2\n"; fi; echo "[+] New Bucket exists"; + # new_bucket_exists=$($check2); if [ -z "$new_bucket_exists" ]; then handle_early_exit_on_restore "[!!!] Didnt find new bucket - Check: $check2\n"; fi; echo "[+] New Bucket exists"; # Throw warning if new bucket is NOT empty - if ! $continue; then nb_contents=$($check3 | wc -l); - if [ $nb_contents -lt 1 ]; then - echo " - New Bucket is empty"; - else - echo "[!!!] New bucket is NOT empty... $nb_contents files exist... exiting"; - echo "add "--continue" to skip this exit"; - echo -e "\nExample empty bucket command:\n aws s3 rm $new_bucket --recursive\n gsutil -m rm -r $new_bucket/* \n"; - handle_early_exit_on_restore "Please empty bucket or add --continue \n"; - fi; - else - echo " - [!] Skipping empty new_bucket check"; - fi; + if ! $continue; then nb_contents=$($check3 | wc -l); + if [ $nb_contents -lt 1 ]; then + echo " - New Bucket is empty"; + else + echo "[!!!] New bucket is NOT empty... $nb_contents files exist... exiting"; + echo "add "--continue" to skip this exit"; + echo -e "\nExample empty bucket command:\n aws s3 rm $new_bucket --recursive\n gsutil -m rm -r $new_bucket/* \n"; + handle_early_exit_on_restore "Please empty bucket or add --continue \n"; + fi; + else + echo " - [!] Skipping empty new_bucket check"; + fi; fi # Check if s3 bucket load date exists if $check4 > /dev/null ; then echo -e " - Backup directory exists" - else + else handle_early_exit_on_restore "\n[X] Backup directory load date ($backup_bucket/$load_date) DOES NOT exist in S3 \nCheck - $check4 \n\n"; fi @@ -2121,8 +2267,8 @@ validation_prechecks_for_restore() { if [[ -n "$compress_format" ]]; then # when compressed, storagemanager/configs wont exist but rather pigz_chunkXXX does. and mysql could be split up case "$compress_format" in - pigz) - flag_cs_local=false + pigz) + flag_cs_local=false flag_mysql=$skip_mdb flag_columnstoreData=$skip_bucket_data @@ -2146,7 +2292,7 @@ validation_prechecks_for_restore() { fi ;; *) # unknown option - handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" esac else flag_configs=false @@ -2164,7 +2310,7 @@ validation_prechecks_for_restore() { *configs*) flag_configs=true ;; #*) echo "unknown: $folder_name" ;; esac - done <<< "$folders" + done <<< "$folders" if $flag_storagemanager && $flag_mysql && $flag_columnstoreData && $flag_configs; then echo " - Backup subdirectories exist" @@ -2177,7 +2323,7 @@ validation_prechecks_for_restore() { handle_early_exit_on_restore " Backup is missing subdirectories... \n$check5\n" fi fi; - else + else handle_early_exit_on_restore " Failed to list backup contents...\n$check5\n" fi; fi; @@ -2185,10 +2331,10 @@ validation_prechecks_for_restore() { if $quiet; then xtra_cmd_args+=" 2> /dev/null"; rsync_flags=" -a"; fi } -# Restore Columnstore.xml by updating critical parameters +# Restore Columnstore.xml by updating critical parameters # Depends on $CS_CONFIGS_PATH/$col_xml_backup restore_columnstore_values() { - + printf "\nRestore Columnstore.xml Values" if ! $quiet; then printf "\n"; else printf "... "; fi; columnstore_config_pairs_to_transfer=( @@ -2209,20 +2355,20 @@ restore_columnstore_values() { for pair in "${columnstore_config_pairs_to_transfer[@]}"; do level_one=$(echo "$pair" | cut -d ' ' -f 1) level_two=$(echo "$pair" | cut -d ' ' -f 2) - + # Get the source value using mcsGetConfig -c source_value=$(mcsGetConfig -c "$CS_CONFIGS_PATH/$col_xml_backup" "$level_one" "$level_two") - + if [ -n "$source_value" ]; then # Set the value in the active Columnstore.xml using mcsSetConfig if mcsSetConfig "$level_one" "$level_two" "$source_value"; then # echo instead of printf to avoid escaping % from HashJoin TotalUmMemory - if ! $quiet; then echo " - Set $level_one $level_two $source_value"; fi; + if ! $quiet; then echo " - Set $level_one $level_two $source_value"; fi; else printf "\n[!] Failed to Set $level_one $level_two $source_value \n"; fi else - if ! $quiet; then printf " - N/A: $level_one $level_two \n"; fi; + if ! $quiet; then printf " - N/A: $level_one $level_two \n"; fi; fi done if $quiet; then printf " Done\n"; fi; @@ -2231,11 +2377,11 @@ restore_columnstore_values() { run_restore() { # Branch logic based on topology - if [ $storage == "LocalStorage" ]; then + if [ $storage == "LocalStorage" ]; then # Branch logic based on where the backup resides if [ $backup_destination == "Local" ]; then - + # MariaDB Columnstore Restore printf "\nRestore MariaDB Columnstore LocalStorage\n" @@ -2246,18 +2392,18 @@ run_restore() i=1; while [ $i -le $DBROOT_COUNT ]; do if [[ $pm == "pm$i" || $HA == true ]]; then - printf " - Deleting Columnstore Data$i ... "; - rm -rf /var/lib/columnstore/data$i/*; - printf " Done \n"; + printf " - Deleting Columnstore Data$i ... "; + rm -rf /var/lib/columnstore/data$i/*; + printf " Done \n"; fi ((i++)) - done + done tar_flags="xf" if ! $quiet; then tar_flags+="v"; fi; cs_prefix="$backup_location$load_date/$split_file_cs_prefix.$compress_format" case $compress_format in - pigz) + pigz) printf " - Decompressing CS Files -> local ... " cd / if ! eval "pigz -dc -p $PARALLEL_THREADS $cs_prefix | tar $tar_flags - "; then @@ -2266,16 +2412,16 @@ run_restore() printf " Done \n" ;; *) # unknown option - handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" esac # Set permissions after restoring files i=1; while [ $i -le $DBROOT_COUNT ]; do if [[ $pm == "pm$i" || $HA == true ]]; then - printf " - Chowning Columnstore Data$i ... "; - chown $columnstore_user:$columnstore_user -R /var/lib/columnstore/data$i/; - printf " Done \n"; + printf " - Chowning Columnstore Data$i ... "; + chown $columnstore_user:$columnstore_user -R /var/lib/columnstore/data$i/; + printf " Done \n"; fi ((i++)) done @@ -2287,25 +2433,25 @@ run_restore() while [ $i -le $DBROOT_COUNT ]; do if [[ $pm == "pm$i" || $HA == true ]]; then printf " - Restoring Columnstore Data$i ... "; - rm -rf /var/lib/columnstore/data$i/*; + rm -rf /var/lib/columnstore/data$i/*; rsync $rsync_flags $backup_location$load_date/data$i/ /var/lib/columnstore/data$i/; - chown $columnstore_user:$columnstore_user -R /var/lib/columnstore/data$i/; - printf " Done Data$i \n"; + chown $columnstore_user:$columnstore_user -R /var/lib/columnstore/data$i/; + printf " Done Data$i \n"; fi ((i++)) - done - - # Put configs in place + done + + # Put configs in place printf " - Columnstore Configs ... " rsync $rsync_flags $backup_location$load_date/configs/storagemanager.cnf $STORAGEMANGER_CNF rsync $rsync_flags $backup_location$load_date/configs/Columnstore.xml $CS_CONFIGS_PATH/$col_xml_backup rsync $rsync_flags $backup_location$load_date/configs/mysql/$pm/ $MARIADB_SERVER_CONFIGS_PATH/ - if [ -f "$backup_location$load_date/configs/cmapi_server.conf" ]; then - rsync $rsync_flags $backup_location$load_date/configs/cmapi_server.conf $CS_CONFIGS_PATH/$cmapi_backup ; + if [ -f "$backup_location$load_date/configs/cmapi_server.conf" ]; then + rsync $rsync_flags $backup_location$load_date/configs/cmapi_server.conf $CS_CONFIGS_PATH/$cmapi_backup ; fi; - printf " Done\n" + printf " Done\n" fi - + elif [ $backup_destination == "Remote" ]; then printf "[~] Copy MySQL Data..." tmp="localscpcopy-$load_date" @@ -2317,36 +2463,36 @@ run_restore() # Loop through per dbroot printf "[~] Columnstore Data..."; i=1; while [ $i -le $DBROOT_COUNT ]; do - if [[ $pm == "pm$i" || $HA == true ]]; then - rm -rf /var/lib/columnstore/data$i/; - rsync -av $scp:$backup_location$load_date/data$i/ /var/lib/columnstore/data$i/ ; + if [[ $pm == "pm$i" || $HA == true ]]; then + rm -rf /var/lib/columnstore/data$i/; + rsync -av $scp:$backup_location$load_date/data$i/ /var/lib/columnstore/data$i/ ; chown $columnstore_user:$columnstore_user -R /var/lib/columnstore/data$i/; fi ((i++)) done - printf "[+] Columnstore Data... Done\n" + printf "[+] Columnstore Data... Done\n" # Put configs in place printf " - Columnstore Configs ... " rsync $rsync_flags $backup_location$load_date/configs/storagemanager.cnf $STORAGEMANGER_CNF - rsync $rsync_flags $backup_location$load_date/configs/Columnstore.xml $CS_CONFIGS_PATH/$col_xml_backup - rsync $rsync_flags $backup_location$load_date/configs/mysql/$pm/ $MARIADB_SERVER_CONFIGS_PATH/ - if [ -f "$backup_location$load_date/configs/cmapi_server.conf" ]; then - rsync $rsync_flags $backup_location$load_date/configs/cmapi_server.conf $CS_CONFIGS_PATH/$cmapi_backup ; + rsync $rsync_flags $backup_location$load_date/configs/Columnstore.xml $CS_CONFIGS_PATH/$col_xml_backup + rsync $rsync_flags $backup_location$load_date/configs/mysql/$pm/ $MARIADB_SERVER_CONFIGS_PATH/ + if [ -f "$backup_location$load_date/configs/cmapi_server.conf" ]; then + rsync $rsync_flags $backup_location$load_date/configs/cmapi_server.conf $CS_CONFIGS_PATH/$cmapi_backup ; fi; printf " Done\n" load_date=$tmp - else + else handle_early_exit_on_restore "Invalid Script Variable --backup_destination: $backup_destination" fi - # MariaDB Server Restore + # MariaDB Server Restore printf "\nRestore MariaDB Server\n" if [[ -n "$compress_format" ]]; then # Handle compressed mariadb-backup restore mbd_prefix="$backup_location$load_date/$split_file_mdb_prefix.$compress_format" case $compress_format in - pigz) + pigz) printf " - Decompressing MariaDB Files -> $MARIADB_PATH ... " rm -rf $MARIADB_PATH/* cd / @@ -2355,14 +2501,14 @@ run_restore() fi; printf " Done \n" printf " - Running mariabackup --prepare ... " - if eval "mariabackup --prepare --target-dir=$MARIADB_PATH $xtra_cmd_args"; then - printf " Done\n"; - else - handle_early_exit_on_restore "Failed to --prepare\nmariabackup --prepare --target-dir=$MARIADB_PATH"; - fi; + if eval "mariabackup --prepare --target-dir=$MARIADB_PATH $xtra_cmd_args"; then + printf " Done\n"; + else + handle_early_exit_on_restore "Failed to --prepare\nmariabackup --prepare --target-dir=$MARIADB_PATH"; + fi; ;; *) # unknown option - handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nUnknown compression flag: $compress_format\n" esac else eval "mariabackup --prepare --target-dir=$backup_location$load_date/mysql/ $xtra_cmd_args" @@ -2379,18 +2525,18 @@ run_restore() printf "\nRestore MariaDB Columnstore S3\n" # Sync the columnstore data from the backup bucket to the new bucket - if ! $skip_bucket_data && [ "$new_bucket" ] && [ $pm == "pm1" ]; then + if ! $skip_bucket_data && [ "$new_bucket" ] && [ $pm == "pm1" ]; then printf "[+] Starting Bucket Sync:\n - $backup_bucket/$load_date/columnstoreData to $new_bucket\n" s3sync $backup_bucket/$load_date/columnstoreData/ $new_bucket/ " - Done with S3 Bucket sync\n" - else + else printf "[!] Skipping Columnstore Bucket\n" fi; - + printf "[+] Starting Columnstore Configurations & Metadata: \n" if [[ -n "$compress_format" ]]; then cs_prefix="$backup_bucket/$load_date/$split_file_cs_prefix.$compress_format" case $compress_format in - pigz) + pigz) printf " - Decompressing CS Files bucket -> local ... " if [ $cloud == "gcp" ]; then # gsutil supports simple "cat prefix*" to standard out unlike awscli @@ -2399,11 +2545,11 @@ run_restore() handle_early_exit_on_restore "Failed to decompress and untar columnstore localfiles\ncommand:\n$gsutil cat $cs_prefix* | gzip -dc | tar xf - \n\n" fi elif [ $cloud == "aws" ]; then - + # List all the pigz compressed chunks in the S3 prefix - should I clear prior local files? chunk_list=$($awscli s3 ls "$cs_prefix" | awk '{print $NF}') cd / - + # Use process substitution to concatenate the compressed chunks and pipe to pigz and extract using tar if ! eval "pigz -dc -p $PARALLEL_THREADS <(for chunk in \$chunk_list; do $awscli s3 cp \"$backup_bucket/$load_date/\$chunk\" - ; done) | tar xf - "; then handle_early_exit_on_restore "Failed to decompress and untar columnstore localfiles\ncommand:\npigz -dc -p $PARALLEL_THREADS <(for chunk in \$chunk_list; do $awscli s3 cp \"$backup_bucket/$load_date/\$chunk\" - ; done) | tar xf - \n\n" @@ -2412,21 +2558,21 @@ run_restore() printf " Done \n" ;; *) # unknown option - handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" esac - + else - # Columnstore.xml and cmapi are renamed so that only specifc values are restored + # Columnstore.xml and cmapi are renamed so that only specifc values are restored s3cp $backup_bucket/$load_date/configs/Columnstore.xml $CS_CONFIGS_PATH/$col_xml_backup s3cp $backup_bucket/$load_date/configs/cmapi_server.conf $CS_CONFIGS_PATH/$cmapi_backup - s3cp $backup_bucket/$load_date/configs/storagemanager.cnf $STORAGEMANGER_CNF + s3cp $backup_bucket/$load_date/configs/storagemanager.cnf $STORAGEMANGER_CNF s3sync $backup_bucket/$load_date/storagemanager/cache/data$pm_number $cs_cache/data$pm_number/ " - Done cache/data$pm_number/\n" s3sync $backup_bucket/$load_date/storagemanager/metadata/data$pm_number $cs_metadata/data$pm_number/ " - Done metadata/data$pm_number/\n" if ! $skip_mdb; then s3sync $backup_bucket/$load_date/configs/mysql/$pm/ $MARIADB_SERVER_CONFIGS_PATH/ " - Done $MARIADB_SERVER_CONFIGS_PATH/\n"; fi if s3ls "$backup_bucket/$today/storagemanager/journal/data$ASSIGNED_DBROOT" > /dev/null 2>&1; then s3sync $backup_bucket/$load_date/storagemanager/journal/data$pm_number $cs_journal/data$pm_number/ " - Done journal/data$pm_number/\n" - else + else echo " - Done journal/data$pm_number was empty" fi fi; @@ -2435,44 +2581,44 @@ run_restore() printf "[+] Adjusting storagemanager.cnf ... \n" target_bucket=$backup_bucket target_prefix="prefix = $load_date\/columnstoreData\/" - if [ -n "$new_bucket" ]; then - target_bucket=$new_bucket; - target_prefix="# prefix \= cs\/"; + if [ -n "$new_bucket" ]; then + target_bucket=$new_bucket; + target_prefix="# prefix \= cs\/"; fi - if [ -n "$new_region" ]; then + if [ -n "$new_region" ]; then if [ $cloud == "gcp" ]; then endpoint="storage.googleapis.com"; else endpoint="s3.$new_region.amazonaws.com"; fi; - sed -i "s|^endpoint =.*|endpoint = ${endpoint}|g" $STORAGEMANGER_CNF; - sed -i "s|^region =.*|region = ${new_region}|g" $STORAGEMANGER_CNF; + sed -i "s|^endpoint =.*|endpoint = ${endpoint}|g" $STORAGEMANGER_CNF; + sed -i "s|^region =.*|region = ${new_region}|g" $STORAGEMANGER_CNF; echo " - Adjusted endpoint & region"; fi # Removes prefix of the protocol and escapes / for sed target_bucket_name=$(echo "${target_bucket#$protocol://}" | sed "s/\//\\\\\//g") - if [ -n "$new_key" ]; then - sed -i "s|aws_access_key_id =.*|aws_access_key_id = ${new_key}|g" $STORAGEMANGER_CNF; - echo " - Adjusted aws_access_key_id"; + if [ -n "$new_key" ]; then + sed -i "s|aws_access_key_id =.*|aws_access_key_id = ${new_key}|g" $STORAGEMANGER_CNF; + echo " - Adjusted aws_access_key_id"; fi - if [ -n "$new_secret" ]; then - sed -i "s|aws_secret_access_key =.*|aws_secret_access_key = ${new_secret}|g" $STORAGEMANGER_CNF; - echo " - Adjusted aws_secret_access_key"; + if [ -n "$new_secret" ]; then + sed -i "s|aws_secret_access_key =.*|aws_secret_access_key = ${new_secret}|g" $STORAGEMANGER_CNF; + echo " - Adjusted aws_secret_access_key"; fi - bucket=$( grep -m 1 "^bucket =" $STORAGEMANGER_CNF | sed "s/\//\\\\\//g"); + bucket=$( grep -m 1 "^bucket =" $STORAGEMANGER_CNF | sed "s/\//\\\\\//g"); sed -i "s/$bucket/bucket = $target_bucket_name/g" $STORAGEMANGER_CNF; echo " - Adjusted bucket"; - + prefix=$( grep -m 1 "^prefix =" $STORAGEMANGER_CNF | sed "s/\//\\\\\//g"); - if [ ! -z "$prefix" ]; then - sed -i "s/$prefix/$target_prefix/g" $STORAGEMANGER_CNF; - echo " - Adjusted prefix"; + if [ ! -z "$prefix" ]; then + sed -i "s/$prefix/$target_prefix/g" $STORAGEMANGER_CNF; + echo " - Adjusted prefix"; fi; # Check permissions chown -R $columnstore_user:$columnstore_user /var/lib/columnstore/ chown -R root:root $MARIADB_SERVER_CONFIGS_PATH/ - + # Confirm S3 connection works if [ $mode == "direct" ]; then echo "[+] Checking S3 Connection ..."; if testS3Connection $xtra_cmd_args; then echo " - S3 Connection passes" ; else handle_early_exit_on_restore "\n[X] S3 Connection issues - retest/configure\n"; fi; fi; printf "[+] MariaDB Columnstore Done\n\n" @@ -2480,10 +2626,10 @@ run_restore() if ! $skip_mdb; then printf "Restore MariaDB Server\n" if [[ -n "$compress_format" ]]; then - # Handle compressed mariadb-backup restore + # Handle compressed mariadb-backup restore mbd_prefix="$backup_bucket/$load_date/$split_file_mdb_prefix.$compress_format" case $compress_format in - pigz) + pigz) printf "[+] Decompressing mariadb-backup bucket -> local ... " rm -rf $MARIADB_PATH/* if [ $cloud == "gcp" ]; then @@ -2494,7 +2640,7 @@ run_restore() chunk_list=$($awscli s3 ls "$mbd_prefix" | awk '{print $NF}') cd / - + if ! eval "pigz -dc -p $PARALLEL_THREADS <(for chunk in \$chunk_list; do $awscli s3 cp "$backup_bucket/$load_date/\$chunk" -; done) | mbstream -p $PARALLEL_THREADS -x -C $MARIADB_PATH"; then handle_early_exit_on_restore "Failed to decompress mariadb backup\ncommand:\npigz -dc -p $PARALLEL_THREADS <(for chunk in \$chunk_list; do $awscli s3 cp "$backup_bucket/$load_date/\$chunk" -; done) | mbstream -p $PARALLEL_THREADS -x -C $MARIADB_PATH \n" fi; @@ -2504,29 +2650,29 @@ run_restore() if eval "mariabackup --prepare --target-dir=$MARIADB_PATH $xtra_cmd_args"; then printf " Done\n"; else handle_early_exit_on_restore "Failed to --prepare\nmariabackup --prepare --target-dir=$MARIADB_PATH"; fi; ;; *) # unknown option - handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" + handle_early_exit_on_backup "\nunknown compression flag: $compress_format\n" esac - + else # Copy the MariaDB data, prepare and put in place printf " - Copying down MariaDB data ... " - rm -rf $backup_location$load_date/mysql/ + rm -rf $backup_location$load_date/mysql/ mkdir -p $backup_location$load_date/mysql/ s3sync $backup_bucket/$load_date/mysql/ $backup_location$load_date/mysql " Done\n" # Run prepare and copy back printf " - Running mariabackup --prepare ... " - if eval "mariabackup --prepare --target-dir=$backup_location$load_date/mysql/ $xtra_cmd_args"; then - printf " Done\n"; - else - echo "failed"; + if eval "mariabackup --prepare --target-dir=$backup_location$load_date/mysql/ $xtra_cmd_args"; then + printf " Done\n"; + else + echo "failed"; fi; rm -rf /var/lib/mysql/* > /dev/null printf " - Running mariabackup --copy-back ... " - if eval "mariabackup --copy-back --target-dir=$backup_location$load_date/mysql/ $xtra_cmd_args"; then - printf " Done\n"; - else - echo "failed"; + if eval "mariabackup --copy-back --target-dir=$backup_location$load_date/mysql/ $xtra_cmd_args"; then + printf " Done\n"; + else + echo "failed"; fi; fi; @@ -2543,13 +2689,13 @@ run_restore() fi if [[ $pm == "pm1" ]]; then restore_columnstore_values; fi; - + end=$(date +%s) runtime=$((end-start)) - printf "\n[+] Runtime: $runtime\n" - printf "[+] $final_message\n\n" - if ! $quiet; then - if [ $pm == "pm1" ]; then + printf "\nRuntime: $runtime\n" + printf "$final_message\n\n" + if ! $quiet; then + if [ $pm == "pm1" ]; then echo -e " - Last you need to manually configure mariadb replication between nodes" echo -e " - systemctl start mariadb " echo -e " - systemctl start mariadb-columnstore-cmapi " @@ -2571,24 +2717,28 @@ load_default_dbrm_variables() { backup_location=/tmp/dbrm_backups STORAGEMANGER_CNF="/etc/columnstore/storagemanager.cnf" storage=$(grep -m 1 "^service = " $STORAGEMANGER_CNF | awk '{print $3}') + skip_storage_manager=false mode="once" quiet=false - + + list_dbrm_backups=false dbrm_dir="/var/lib/columnstore/data1/systemFiles/dbrm" - if [ "$storage" == "S3" ]; then + if [ "$storage" == "S3" ]; then dbrm_dir="/var/lib/columnstore/storagemanager" fi } -# for next release load_default_dbrm_restore_variables() { - backup_location=/tmp/dbrm_backups + auto_start=true + backup_location="/tmp/dbrm_backups" STORAGEMANGER_CNF="/etc/columnstore/storagemanager.cnf" storage=$(grep -m 1 "^service = " $STORAGEMANGER_CNF | awk '{print $3}') backup_folder_to_restore="" + skip_dbrm_backup=false + skip_storage_manager=false dbrm_dir="/var/lib/columnstore/data1/systemFiles/dbrm" - if [ "$storage" == "S3" ]; then + if [ "$storage" == "S3" ]; then dbrm_dir="/var/lib/columnstore/storagemanager" fi } @@ -2597,35 +2747,39 @@ print_dbrm_backup_help_text() { echo " Columnstore DBRM Backup - -m | --mode 'loop' or 'once' ; Determines if this script runs in a forever loop sleeping -i minutes or just once - -i | --interval Number of minutes to sleep when --mode loop - -r | --retention-days Number of days of dbrm backups to retain - script will delete based on last update file time - -p | --path path of where to save the dbrm backups on disk - -nb | --name-backup custom name to prefex dbrm backups with + -m | --mode ['loop','once']; Determines if this script runs in a forever loop sleeping -i minutes or just once + -i | --interval Number of minutes to sleep when --mode loop + -r | --retention-days Retain dbrm backups created within the last X days, the rest are deleted + -p | --path path of where to save the dbrm backups on disk + -nb | --name-backup custom name to prefex dbrm backups with + -ssm | --skip-storage-manager skip backing up storagemanager directory Default: ./$0 dbrm_backup -m once --retention-days 7 --path /tmp/dbrm_backups Examples: ./$0 dbrm_backup --mode loop --interval 90 --retention-days 7 --path /mnt/dbrm_backups ./$0 dbrm_backup --mode once --retention-days 7 --path /mnt/dbrm_backups -nb my-one-off-backup - + Cron Example: */60 */3 * * * root bash /root/$0 dbrm_backup -m once --retention-days 7 --path /tmp/dbrm_backups >> /tmp/dbrm_backups/cs_backup.log 2>&1 "; } -# for next release print_dbrm_restore_help_text() { echo " Columnstore DBRM Restore - -p | --path path of where to save the dbrm backups on disk - -d | --directory date or directory chose to restore from + -p | --path path of where to save the dbrm backups on disk + -d | --directory date or directory chose to restore from + -ns | --no-start do not attempt columnstore startup post dbrm_restore + -sdbk| --skip-dbrm-backup skip backing up dbrms brefore restoring + -ssm | --skip-storage-manager skip restoring storagemanager directory - Default: ./$0 dbrm_restore --path /tmp/dbrm_backups + Default: ./$0 dbrm_restore --path /tmp/dbrm_backups Examples: - ./$0 dbrm_restore --path /tmp/dbrm_backups --directory dbrm_backup12252023 + ./$0 dbrm_restore --path /tmp/dbrm_backups --directory dbrm_backup_20240318_172842 + ./$0 dbrm_restore --path /tmp/dbrm_backups --directory dbrm_backup_20240318_172842 --no-start "; } @@ -2663,6 +2817,10 @@ parse_dbrms_variables() { shift # past argument shift # past value ;; + -ssm|--skip-storage-manager) + skip_storage_manager=true + shift # past argument + ;; -q | --quiet) quiet=true shift # past argument @@ -2671,6 +2829,10 @@ parse_dbrms_variables() { print_dbrm_backup_help_text; exit 1; ;; + "list") + list_dbrm_backups=true + shift # past argument + ;; *) # unknown option printf "\nunknown flag: $1\n" print_dbrm_backup_help_text @@ -2679,7 +2841,6 @@ parse_dbrms_variables() { done } -# for next release parse_dbrm_restore_variables() { # Dynamic Arguments @@ -2699,6 +2860,18 @@ parse_dbrm_restore_variables() { shift # past argument shift # past value ;; + -ns | --no-start) + auto_start=false + shift # past argument + ;; + -sdbk| --skip-dbrm-backup) + skip_dbrm_backup=true + shift # past argument + ;; + -ssm|--skip-storage-manager) + skip_storage_manager=true + shift # past argument + ;; -h|--help|-help|help) print_dbrm_restore_help_text; exit 1; @@ -2709,6 +2882,11 @@ parse_dbrm_restore_variables() { exit 1; esac done + + if [[ "${backup_location: -1}" == "/" ]]; then + # Remove the final / + backup_location="${backup_location%/}" + fi } confirm_numerical_or_decimal_else_fail() { @@ -2749,9 +2927,9 @@ validation_prechecks_for_dbrm_backup() { confirm_numerical_or_decimal_else_fail "$retention_days" "Retention" # Check backup location exists - if [ ! -d $backup_location ]; then + if [ ! -d $backup_location ]; then echo "[+] Created: $backup_location" - mkdir "$backup_location"; + mkdir "$backup_location"; fi; # Confirm bucket connection @@ -2763,25 +2941,139 @@ validation_prechecks_for_dbrm_backup() { fi; } -# for next release +validation_prechecks_before_listing_restore_options() { + + # confirm backup directory exists + if [ ! -d $backup_location ]; then + printf "[!!] Backups Directory does NOT exist --path $backup_location \n" + printf "ls -la $backup_location\n\n" + exit 1; + fi + + # Check if backup directory is empty + if [ -z "$(find "$backup_location" -mindepth 1 | head )" ]; then + printf "[!!] Backups Directory is empty --path $backup_location \n" + printf "ls -la $backup_location\n\n" + exit 1 + fi +} + validation_prechecks_for_dbrm_restore() { + + printf "Prechecks\n" + echo "--------------------------------------------------------------------------" # Confirm storage not empty if [ -z "$storage" ]; then printf "[!] Empty storage: \ncheck: grep -m 1 \"^service = \" \$STORAGEMANGER_CNF | awk '{print \$3}' \n\n"; fi; - # Check backup location exists - if [ ! -d $backup_location ]; then - echo "[!] \$backup_location: Expected directory of dbrm_backups" + # Check backup directory exists + if [ ! -d $backup_location ]; then + printf "[!] \$backup_location: Path of backups does Not exist\n" + printf "Path: $backup_location\n\n" exit 2; fi + # Check specific backup exists + backup_folder_to_restore_dbrms=$backup_folder_to_restore + if [ $storage == "S3" ]; then + backup_folder_to_restore_dbrms="${backup_folder_to_restore}/dbrms" + fi + + if [ ! -d "${backup_location}/${backup_folder_to_restore_dbrms}" ]; then + printf "[!] \$backup_folder_to_restore: Path of backup to restore does Not exist\n" + printf "Path: ${backup_location}/${backup_folder_to_restore_dbrms}\n\n" + exit 2; + else + echo " - Backup directory exists" + if [ "$(ls -A ${backup_location}/${backup_folder_to_restore_dbrms})" ]; then + + expected_files=( + "BRM_saves_current" + "BRM_saves_em" + "BRM_saves_journal" + "BRM_saves_vbbm" + "BRM_saves_vss" + "BRM_savesA_em" + "BRM_savesA_vbbm" + "BRM_savesA_vss" + "BRM_savesB_em" + "BRM_savesB_vbbm" + "BRM_savesB_vss" + "oidbitmap" + "SMTxnID" + ) + + # Check if all expected files exist + for file in "${expected_files[@]}"; do + if [ ! -f "${backup_location}/${backup_folder_to_restore_dbrms}/${file}" ]; then + printf "[!] File not found: ${file} in the DBRM backup directory\n" + printf "Path: ${backup_location}/${backup_folder_to_restore_dbrms}/${file}\n\n" + exit 2; + fi + done + + # For S3 check storagemanager dir exists in backup unless skip storagemanager is passed + if [ "$storage" == "S3" ] && [ $skip_storage_manager == false ]; then + if [ ! -d "${backup_location}/${backup_folder_to_restore}/metadata" ]; then + printf "\n[!!] Path Not Found: ${backup_location}/${backup_folder_to_restore}/metadata \n" + printf "Retry with a different backup to restore or use flag --skip-storage-manager\n\n" + exit 2; + fi; + fi + + + printf " - Backup contains all files\n" + + else + printf "[!] No files found in the DBRM backup directory\n" + printf "Path: ${backup_location}/${backup_folder_to_restore_dbrms}\n\n" + exit 2; + fi + fi + # Confirm bucket connection if [ "$storage" == "S3" ]; then - if ! testS3Connection 1>/dev/null 2>/dev/null; then + if testS3Connection 1>/dev/null 2>/dev/null; then + echo " - S3 Connection works" + else printf "\n[!] Failed testS3Connection\n\n" exit 1; fi fi; + # Download cs_package_manager.sh if not exists + if [ ! -f "cs_package_manager.sh" ]; then + wget https://raw.githubusercontent.com/mariadb-corporation/mariadb-columnstore-engine/develop/cmapi/scripts/cs_package_manager.sh; chmod +x cs_package_manager.sh; + fi; + if source cs_package_manager.sh source ;then + echo " - Sourced cs_package_manager.sh" + else + printf "\n[!!] Failed to source cs_package_manager.sh\n\n" + exit 1; + + fi + + # Confirm the function exists and the source of cs_package_manager.sh worked + if command -v check_package_managers &> /dev/null; then + # The function exists, call it + check_package_managers + else + echo "Error: 'check_package_managers' function not found via cs_package_manager.sh"; + exit 1; + fi + cs_package_manager_functions=( + "start_cmapi" + "start_mariadb" + "init_cs_up" + ) + + for func in "${cs_package_manager_functions[@]}"; do + if command -v $func &> /dev/null; then + continue; + else + echo "Error: '$func' function not found via cs_package_manager.sh"; + exit 1; + fi + done } process_dbrm_backup() { @@ -2789,16 +3081,30 @@ process_dbrm_backup() { load_default_dbrm_variables parse_dbrms_variables "$@"; + if $list_dbrm_backups; then + validation_prechecks_before_listing_restore_options + printf "\nExisting DBRM Backups\n"; + list_restore_options_from_backups "$@" + echo "--------------------------------------------------------------------------" + printf "Restore with ./$0 dbrm_restore --path $backup_location --directory \n\n" + exit 0; + fi; + if ! $quiet ; then - printf "\n[+] Inputs\n"; - printf " CS Storage: $storage\n"; - printf " Source: $dbrm_dir\n"; - printf " Backups: $backup_location\n"; - if [ "$mode" == "loop" ]; then - printf " Interval: $backup_interval_minutes minutes\n"; + + printf "\nDBRM Backup\n"; + echo "--------------------------------------------------------------------------" + if [ "$storage" == "S3" ]; then echo "Skips: Storagemanager($skip_storage_manager)"; fi; + echo "--------------------------------------------------------------------------" + printf "CS Storage: $storage\n"; + printf "Source: $dbrm_dir\n"; + printf "Backups: $backup_location\n"; + if [ "$mode" == "loop" ]; then + printf "Interval: $backup_interval_minutes minutes\n"; fi; - printf " Retention: $retention_days day(s)\n" - printf " Mode: $mode\n\n" + printf "Retention: $retention_days day(s)\n" + printf "Mode: $mode\n" + echo "--------------------------------------------------------------------------" fi; validation_prechecks_for_dbrm_backup @@ -2809,12 +3115,17 @@ process_dbrm_backup() { timestamp=$(date +%Y%m%d_%H%M%S) backup_folder="$backup_location/${backup_base_name}_${timestamp}" mkdir -p "$backup_folder" - - # Copy files to the backup directory - cp -arp "$dbrm_dir"/* "$backup_folder" - if [ "$storage" == "S3" ]; then + # Copy files to the backup directory + if [[ $skip_storage_manager == false || $storage == "LocalStorage" ]]; then + if ! $quiet; then printf " - copying $dbrm_dir ..."; fi; + cp -arp "$dbrm_dir"/* "$backup_folder" + if ! $quiet; then printf " Done\n"; fi; + fi + + if [ "$storage" == "S3" ]; then # smcat em files to disk + if ! $quiet; then printf " - copying DBRMs from bucket ..."; fi; mkdir $backup_folder/dbrms/ smls /data1/systemFiles/dbrm 2>/dev/null > $backup_folder/dbrms/dbrms.txt smcat /data1/systemFiles/dbrm/BRM_saves_current 2>/dev/null > $backup_folder/dbrms/BRM_saves_current @@ -2828,190 +3139,524 @@ process_dbrm_backup() { smcat /data1/systemFiles/dbrm/BRM_savesB_em 2>/dev/null > $backup_folder/dbrms/BRM_savesB_em smcat /data1/systemFiles/dbrm/BRM_savesB_vbbm 2>/dev/null > $backup_folder/dbrms/BRM_savesB_vbbm smcat /data1/systemFiles/dbrm/BRM_savesB_vss 2>/dev/null > $backup_folder/dbrms/BRM_savesB_vss + smcat /data1/systemFiles/dbrm/oidbitmap 2>/dev/null > $backup_folder/dbrms/oidbitmap + smcat /data1/systemFiles/dbrm/SMTxnID 2>/dev/null > $backup_folder/dbrms/SMTxnID + smcat /data1/systemFiles/dbrm/tablelocks 2>/dev/null > $backup_folder/dbrms/tablelocks + if ! $quiet; then printf " Done\n"; fi; fi - - # Clean up old backups - # example: find /tmp/dbrmBackups/ -maxdepth 1 -type d -name "dbrm_backup_*" -mtime +1 -exec rm -r {} \; - find "$backup_location" -maxdepth 1 -type d -name "${backup_base_name}_*" -mtime +$retention_days -exec rm -r {} \; - printf "[+] Created: $backup_folder\n" - if [ "$mode" == "once" ]; then break; fi; + if [ $retention_days -gt 0 ] ; then + # Clean up old backups + # example: find /tmp/dbrm_backups -maxdepth 1 -type d -name "dbrm_backup_*" -mtime +1 -exec rm -r {} \; + if ! $quiet; then printf " - applying retention policy ..."; fi; + find "$backup_location" -maxdepth 1 -type d -name "${backup_base_name}_*" -mtime +$retention_days -exec rm -r {} \; + if ! $quiet; then printf " Done\n"; fi; + fi; - printf "[+] Sleeping ... $sleep_seconds seconds\n" + printf "Created: $backup_folder\n" + + + if [ "$mode" == "once" ]; then + end=$(date +%s) + runtime=$((end-start)) + if ! $quiet; then printf "Runtime: $runtime\n"; fi; + break; + fi; + + printf "Sleeping ... $sleep_seconds seconds\n" sleep "$sleep_seconds" done - if ! $quiet; then printf "[+] Complete\n\n"; fi; + if ! $quiet; then printf "Complete\n\n"; fi; +} + +is_cmapi_installed() { + + cmapi_installed_command="" + case $package_manager in + yum ) + cmapi_installed_command="yum list installed MariaDB-columnstore-cmapi &> /dev/null;"; + ;; + apt ) + cmapi_installed_command="dpkg-query -s mariadb-columnstore-cmapi &> /dev/null;"; + ;; + *) # unknown option + echo "\npackage manager not implemented: $package_manager\n" + exit 2; + esac + + if eval $cmapi_installed_command ; then + return 0; + else + return 1; + + fi; +} + +start_mariadb_cmapi_columnstore() { + + start_mariadb + start_cmapi + init_cs_up + + # For verbose debugging + #grep -i rollbackAll /var/log/mariadb/columnstore/debug.log | tail -n 3 | awk '{ print $1, $2, $3, $(NF-2), $(NF-1), $NF }' + +} + +# Currently assumes systemd installed +shutdown_columnstore_mariadb_cmapi() { + + pf=35 + init_cs_down + wait_cs_down 0 + + printf "%-${pf}s ... " " - Stopping MariaDB Server" + if ! systemctl stop mariadb; then + echo "[!!] Failed to stop mariadb" + exit 1; + else + printf "Done\n" + fi + + if is_cmapi_installed ; then + printf "%-${pf}s ... " " - Stopping CMAPI" + if ! systemctl stop mariadb-columnstore-cmapi; then + echo "[!!] Failed to stop CMAPI" + exit 1; + else + printf "Done\n" + fi + fi +} + +# Input +# $1 - directory to search +# Output +# subdir_dbrms +# latest_em_file +# em_file_size +# em_file_created +# em_file_full_path +# storagemanager_dir_exists +get_latest_em_from_directory() { + + subdir_dbrms="" + latest_em_file="" + em_file_size="" + em_file_created="" + em_file_full_path="" + storagemanager_dir_exists=true + + # Find the most recently modified file in the current subdirectory + if [ $storage == "S3" ]; then + subdir_dbrms="${1}/dbrms/" + subdir_metadata="${1}/metadata/data1/systemFiles/dbrm/" + + # Handle missing metadata directory + if [ ! -d $subdir_dbrms ]; then + printf "%-45s Missing dbrms sub directory\n" "$(basename $1)" + return 1; + fi + + + if [ -d "${1}/metadata" ]; then + latest_em_meta_file=$(find "${subdir_metadata}" -maxdepth 1 -type f -name "BRM_saves*_em.meta" -exec ls -lat {} + | awk 'NR==1 {printf "%-12s %-4s %-2s %-5s %s\n", $5, $6, $7, $8, $9}'| head -n 1 ) + else + # Handle missing metadata directory & guess the latest em file based on the largest size + + # Example: find /tmp/dbrm_backups/dbrm_backup_20240605_180906/dbrms -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lhS {} + + latest_em_meta_file=$(find "${subdir_dbrms}" -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lhS {} + | awk 'NR==1 {printf "%-12s %-4s %-2s %-5s %s\n", $5, $6, $7, $8, $9}'| head -n 1) + storagemanager_dir_exists=false + fi + + em_meta_file_name=$(basename "$latest_em_meta_file") + latest_em_file="$subdir_dbrms$(echo $em_meta_file_name | sed 's/\.meta$//' )" + em_file_size=$(ls -la "$latest_em_file" | awk '{print $5}' ) + em_file_created=$(echo "$latest_em_meta_file" | awk '{print $2,$3,$4}' ) + em_file_full_path=$latest_em_file + + if [ ! -f $latest_em_file ]; then + echo "S3 List Option: Failed to find $latest_em_file" + exit; + fi + else + subdir_dbrms="$1" + latest_em_file=$(find "${subdir_dbrms}" -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lat {} + | awk 'NR==1 {printf "%-12s %-4s %-2s %-5s %s\n", $5, $6, $7, $8, $9}'| head -n 1) + em_file_size=$(echo "$latest_em_file" | awk '{print $1}' ) + em_file_created=$(echo "$latest_em_file" | awk '{print $2,$3,$4}' ) + em_file_full_path=$(echo $latest_em_file | awk '{print $NF}' ) + fi +} + +list_restore_options_from_backups() { + + echo "--------------------------------------------------------------------------" + printf "%-45s %-13s %-15s %-12s %-12s %-10s %-10s\n" "Options" "Last-Updated" "Extent Map" "EM-Size" "Journal-Size" "VBBM-Size" "VSS-Size" + + # Iterate over subdirectories + for subdir in "${backup_location}"/*; do + + get_latest_em_from_directory "$subdir" + + if [ -f "${subdir_dbrms}/BRM_saves_journal" ]; then + em_file_name=$(basename "$em_file_full_path") + version_prefix=${em_file_name::-3} + journal_file=$(ls -la "${subdir_dbrms}/BRM_saves_journal" 2>/dev/null | awk 'NR==1 {print $5}' ) + vbbm_file=$(ls -la "${subdir_dbrms}/${version_prefix}_vbbm" 2>/dev/null | awk 'NR==1 {print $5}' ) + vss_file=$(ls -la "${subdir_dbrms}/${version_prefix}_vss" 2>/dev/null | awk 'NR==1 {print $5}' ) + if [ $storagemanager_dir_exists == false ]; then + vss_file+=" (No Storagemanager Dir)" + fi; + printf "%-45s %-13s %-15s %-12s %-12s %-10s %-10s\n" "$(basename "$subdir")" "$em_file_created" "$em_file_name" "$em_file_size" "$journal_file" "$vbbm_file" "$vss_file" + fi + done + + } -# for next release process_dbrm_restore() { - + load_default_dbrm_restore_variables parse_dbrm_restore_variables "$@" # print current job variables - printf "\n[+] Inputs\n"; - printf " Backups Directory: $backup_location\n"; - printf " Backup to Restore: $backup_folder_to_restore\n"; - printf " CS Storage: $storage\n"; - printf " Restore Target: $dbrm_dir\n"; + printf "\nDBRM Restore Variables\n" + echo "--------------------------------------------------------------------------" + echo "Skips: DBRM Backup($skip_dbrm_backup) Storagemanager($skip_storage_manager)" + echo "--------------------------------------------------------------------------" + printf "CS Storage: $storage \n" + printf "Backups Directory: $backup_location \n" + printf "Backup to Restore: $backup_folder_to_restore \n\n" - # Display restore options - if [ -z "$backup_folder_to_restore" ]; then - printf "\n[!!] --directory is empty\n" - printf "See Options Below: \n" - printf "%-30s %-12s %-13s %-63s %-12s\n" "Option" "Size" "Last updated" "File Location" "Journal Size"; - - # Iterate over subdirectories - for subdir in "${backup_location}"/*; do - # Find the most recently modified file in the current subdirectory - latest_em_file=$(find "${subdir}" -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lt --time=ctime {} + | awk 'NR==1 {printf "%-12s %-4s %-2s %-5s %s\n", $5, $6, $7, $8, $9}'| head -n 1) - journal_file=$(ls -la "${subdir}/BRM_saves_journal" | awk 'NR==1 {print $5}' ) - printf "%-30s %-90s %-12s\n" "$(basename "$subdir")" "$latest_em_file" "$journal_file"; - done + validation_prechecks_before_listing_restore_options - printf "\n[!!] define which backup to restore eg. --directory dbrm_backup_20240103_183536\n" - printf "exiting ...\n\n" + # Display restore options + if [ -z "$backup_folder_to_restore" ]; then + printf "[!] Pick Option\n" + list_restore_options_from_backups "$@" + printf "\nExample: \n" + printf " --directory dbrm_backup_20240103_183536 \n\n" + printf "Define which backup to restore via flag --directory \n" + echo "Rerun: $0 $@ --directory xxxxxxx" + echo "" exit 1; fi; - + validation_prechecks_for_dbrm_restore + shutdown_columnstore_mariadb_cmapi - if [ ! -f "cs_package_manager.sh" ]; then - wget https://raw.githubusercontent.com/mariadb-corporation/mariadb-columnstore-engine/develop/extra/cs_package_manager.sh; chmod +x cs_package_manager.sh; - fi; - source cs_package_manager.sh source + # Take an automated backup + if [[ $skip_dbrm_backup == false ]]; then + printf " - Saving a DBRM backup before restoring ... \n" + if ! process_dbrm_backup -p $backup_location -r 9999 -nb dbrms_before_restore_backup --quiet ; then + echo "[!!] Failed to take a DBRM backup before restoring" + echo "exiting ..." + exit 1; + fi; + fi; - # Confirm the function exists and the source of cs_package_manager.sh worked - if command -v check_package_managers &> /dev/null; then - # The function exists, call it - check_package_managers - else - echo "Error: 'check_package_managers' function not found via cs_package_manager.sh"; + # Detect newest date _em from the set, if smaller than the current one throw a warning + get_latest_em_from_directory "${backup_location}/${backup_folder_to_restore}" + if [ ! -f $em_file_full_path ]; then + echo "[!] Failed to parse _em file: $em_file_full_path doesnt exist" + exit 1; + fi; + echo "em_file_full_path: $em_file_full_path" + + echo "latest_em_file: $latest_em_file" + echo "em_file_size: $em_file_size" + echo "em_file_created: $em_file_created" + echo "storagemanager_dir_exists: $storagemanager_dir_exists" + echo "subdir_dbrms: $subdir_dbrms" + + em_file_name=$(basename $em_file_full_path) + prefix="${em_file_name%%_em}" + echo "em_file_name: $em_file_name" + echo "prefix: $prefix" + + if [ -z "$em_file_name" ]; then + printf "[!] Undefined EM file name\n" + printf "find "${backup_location}/${backup_folder_to_restore_dbrms}" -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lat {} + \n\n" exit 1; fi - init_cs_down - wait_cs_down 0 - if ! process_dbrm_backup -nb dbrms_before_restore_backup --quiet ; then - echo "[!!] Failed to take a DBRM backup before restoring" - echo "exiting ..." - exit 1; - fi; - # split logic between S3 & LocalStorage - if [ $storage == "S3" ]; then + if [ $storage == "S3" ]; then process_s3_dbrm_restore else process_localstorage_dbrm_restore fi; } -# for next release -process_s3_dbrm_restore() { - - # shuffle DBRMs - detect newest date _em from the set, if smaller than the current one throw a warning - - # manually run load_brm - - # manually run save_brm - - # start columnstore - - # run a basic health check - return 1; +# $1 - File to cat/ upload into S3 +# $2 - Location in storagemanager to overwrite +# example: smput_or_error "${backup_location}/${backup_folder_to_restore_dbrms}/${prefix}_em" "/data1/systemFiles/dbrm/BRM_saves_em" +smput_or_error() { + if ! cat "$1" | smput "$2" 2>/dev/null; then + printf "[!] Failed to smput: $1\n" + else + printf "." + fi } -# for next release +# Depends on +# em_file - most recent EM +# em_file_full_path - full path to most recent EM +# em_file_name - Just the file name of the EM +# prefix - Prefix of the EM file +process_s3_dbrm_restore() { + + printf_offset=45 + printf "\nBefore DBRMs Restore\n" + echo "--------------------------------------------------------------------------" + if ! command -v smls > /dev/null; then + printf "[!] smls not installed ... Exiting\n\n" + exit 1 + else + current_status=$(smls /data1/systemFiles/dbrm/ 2>/dev/null); + if [ $? -ne 0 ]; then + printf "\n[!] Failed to get smls status\n\n" + exit 1 + fi + echo "$current_status" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current|oidbitmap" + fi + + printf "\nRestoring DBRMs\n" + echo "--------------------------------------------------------------------------" + printf " - Desired EM: $em_file_full_path\n" + printf " - Copying DBRMs: ${backup_location}/${backup_folder_to_restore_dbrms} -> S3 Bucket \n" + + printf "\nPreparing\n" + printf "%-${printf_offset}s ..." " - Clearing storagemanager caches" + if [ ! -d "$dbrm_dir/cache" ]; then + echo "Directory $dbrm_dir/cache does not exist." + exit 1 + fi + for cache_dir in "${dbrm_dir}/cache"/*; do + if [ -d "${dbrm_dir}/cache/${cache_dir}" ]; then + echo " - Removing Cache: $cache_dir" + else + printf "." + fi + done + printf " Success\n" + + printf "%-${printf_offset}s ... " " - Starting mcs-storagemanager" + if ! systemctl start mcs-storagemanager ; then + echo "[!!] Failed to start mcs-storagemanager " + exit 1; + else + printf "Done\n" + fi + + printf "\nRestoring\n" + printf "%-${printf_offset}s " " - Restoring Prefix: $prefix " + smput_or_error "${backup_location}/${backup_folder_to_restore_dbrms}/${prefix}_em" "/data1/systemFiles/dbrm/BRM_saves_em" + smput_or_error "${backup_location}/${backup_folder_to_restore_dbrms}/${prefix}_vbbm" "/data1/systemFiles/dbrm/BRM_saves_vbbm" + smput_or_error "${backup_location}/${backup_folder_to_restore_dbrms}/${prefix}_vss" "/data1/systemFiles/dbrm/BRM_saves_vss" + if ! echo "BRM_saves" | smput /data1/systemFiles/dbrm/BRM_saves_current 2>/dev/null; then + printf "[!] Failed to smput: BRM_saves_current\n" + else + printf "." + fi + + em_files=( + "BRM_saves_journal" + "oidbitmap" + "SMTxnID" + "tablelocks" + ) + for file in "${em_files[@]}"; do + if [ ! -f "${backup_location}/${backup_folder_to_restore_dbrms}/${file}" ]; then + printf "[!] File not found: ${file} in the S3 DBRM backup directory\n" + printf "Path: ${backup_location}/${backup_folder_to_restore_dbrms}/${file}\n\n" + continue + fi + smput_or_error "${backup_location}/${backup_folder_to_restore_dbrms}/${file}" "/data1/systemFiles/dbrm/${file}" + done + printf " Success\n" + + printf "%-${printf_offset}s ... " " - Stopping mcs-storagemanager" + if ! systemctl stop mcs-storagemanager ; then + echo "[!!] Failed to stop mcs-storagemanager " + exit 1; + else + printf "Done\n" + fi + printf "%-${printf_offset}s ... " " - clearShm" + clearShm + printf "Done\n" + sleep 2 + + printf "%-${printf_offset}s ... " " - Starting mcs-storagemanager" + if ! systemctl start mcs-storagemanager ; then + echo "[!!] Failed to start mcs-storagemanager " + exit 1; + else + printf "Done\n" + fi + + manually_run_loadbrm_and_savebrm + + printf "\nAfter DBRM Restore\n" + echo "--------------------------------------------------------------------------" + current_status=$(smls /data1/systemFiles/dbrm/ 2>/dev/null); + if [ $? -ne 0 ]; then + printf "\n[!] Failed to get smls status\n\n" + exit 1 + fi + echo "$current_status" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current|oidbitmap" + + if $auto_start; then + printf "\nStartup\n" + echo "--------------------------------------------------------------------------" + start_mariadb_cmapi_columnstore + fi + + # printf "\n[+] Health Check ...\n" + # sleep 2 + # # run a basic health check + # mariadb -e "create database if not exists $backup_folder_to_restore" + # mariadb $backup_folder_to_restore -e "drop table if exists t1" + # mariadb $backup_folder_to_restore -e "create table t1 (a int) engine=columnstore" + # mariadb $backup_folder_to_restore -e "insert into t1 values (1)" + # mariadb $backup_folder_to_restore -e "update t1 set a=1" + # mariadb $backup_folder_to_restore -e "delete from t1 where a=1" + # mariadb -e "drop database if exists $backup_folder_to_restore" + + printf "\nDBRM Restore Complete\n\n" +} + +# Depends on +# em_file - most recent EM +# em_file_full_path - full path to most recent EM +# em_file_name - Just the file name of the EM +# prefix - Prefix of the EM process_localstorage_dbrm_restore() { - # shuffle DBRMs - detect newest date _em from the set, if smaller than the current one throw a warning - em_file=$(find "${backup_location}/${backup_folder_to_restore}" -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lt --time=ctime {} + | head -n 1 ) - #em_file=$(find /tmp/dbrm_backups/dbrm_backup_20240103_195653 -maxdepth 1 -type f -name "BRM_saves*_em" -exec ls -lt --time=ctime {} + | head -n 1) - em_file_full_path=$(echo $em_file | awk '{print $9}' ) - if [ ! -f $em_file_full_path ]; then - echo "[!] Failed to parse _em file: $em_file_full_path doesnt exist" + printf "\nBefore DBRMs Restore\n" + echo "--------------------------------------------------------------------------" + ls -la "${dbrm_dir}" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current" + printf " - Clearing active DBRMs ... " + if rm -rf $dbrm_dir ; then + printf "Done\n" + else + echo "Failed to delete files in $dbrm_dir " exit 1; - fi; - em_file_name=$(basename $em_file_full_path) - prefix="${em_file_name%%_em}" + fi - echo "[+] Clearing Active DBRMs" - rm -rf $dbrm_dir + printf "\nRestoring DBRMs\n" + echo "--------------------------------------------------------------------------" + printf " - Desired EM: $em_file_full_path\n" + printf " - Copying DBRMs: \"${backup_location}/${backup_folder_to_restore_dbrms}\" -> \"$dbrm_dir\" \n" + cp -arp "${backup_location}/${backup_folder_to_restore_dbrms}" $dbrm_dir - echo "[+] Copying backup DBRMs to active DBRMs dir" - cp -arp "${backup_location}/${backup_folder_to_restore}" $dbrm_dir - - echo "[+] Restoring Prefix: $prefix" - vbbm_name="${prefix}_vbbm" - vss_name="${prefix}_vss" - cp -arp "${dbrm_dir}/$em_file_name" "${dbrm_dir}/BRM_saves_em" - cp -arp "${dbrm_dir}/$vbbm_name" "${dbrm_dir}/BRM_saves_vbbm" - cp -arp "${dbrm_dir}/$vss_name" "${dbrm_dir}/BRM_saves_vss" - echo "Primary Extent Map Files Now:" - ls -la "${dbrm_dir}/BRM_saves_em" - ls -la "${dbrm_dir}/BRM_saves_vbbm" - ls -la "${dbrm_dir}/BRM_saves_vss" - ls -la "${dbrm_dir}/BRM_saves_journal" - - sleep 2 - + if [ "$prefix" != "BRM_saves" ]; then + printf " - Restoring Prefix: $prefix \n" + vbbm_name="${prefix}_vbbm" + vss_name="${prefix}_vss" + cp -arpf "${dbrm_dir}/$em_file_name" "${dbrm_dir}/BRM_saves_em" + cp -arpf "${dbrm_dir}/$vbbm_name" "${dbrm_dir}/BRM_saves_vbbm" + cp -arpf "${dbrm_dir}/$vss_name" "${dbrm_dir}/BRM_saves_vss" + fi + echo "BRM_saves" > "${dbrm_dir}/BRM_saves_current" + chown -R mysql:mysql "${dbrm_dir}" clearShm - # manually run load_brm - printf "\n\n[+] Running load_brm...\n" + sleep 2 + + manually_run_loadbrm_and_savebrm + + printf "\nAfter DBRM Restore\n" + echo "--------------------------------------------------------------------------" + ls -la "${dbrm_dir}" | grep -E "BRM_saves_em|BRM_saves_vbbm|BRM_saves_vss|BRM_saves_journal|BRM_saves_current" + + if $auto_start; then + printf "\nStartup\n" + echo "--------------------------------------------------------------------------" + start_mariadb_cmapi_columnstore + fi + + # printf "\n[+] Health Check ...\n" + # sleep 2 + # # run a basic health check + # mariadb -e "create database if not exists $backup_folder_to_restore" + # mariadb $backup_folder_to_restore -e "drop table if exists t1" + # mariadb $backup_folder_to_restore -e "create table t1 (a int) engine=columnstore" + # mariadb $backup_folder_to_restore -e "insert into t1 values (1)" + # mariadb $backup_folder_to_restore -e "update t1 set a=1" + # mariadb $backup_folder_to_restore -e "delete from t1 where a=1" + # mariadb -e "drop database if exists $backup_folder_to_restore" + + printf "\nDBRM Restore Complete\n\n" +} + +manually_run_loadbrm_and_savebrm() { + + pf_offset=45 + printf "%-${pf_offset}s ... " " - Running load_brm" if ! sudo -su mysql /usr/bin/load_brm /var/lib/columnstore/data1/systemFiles/dbrm/BRM_saves ; then - echo "[!!] Failed to complete load_brm successfully" + printf "\n[!!] Failed to complete load_brm successfully\n\n" exit 1; fi; + printf "%-${pf_offset}s ... " " - Starting mcs-controllernode" if ! systemctl start mcs-controllernode ; then echo "[!!] Failed to start mcs-controllernode " exit 1; + else + printf "Done\n" fi; - echo "[+] Confirming extent map readable..." + printf "%-${pf_offset}s ... " " - Confirming extent map readable" if ! editem -i >/dev/null ; then echo "[!!] Failed to run editem -i (read the EM)" exit 1; + else + printf "Done\n" fi; - echo "[+] Running save_brm..." + printf "%-${pf_offset}s ... \n" " - Running save_brm" if ! sudo -u mysql /usr/bin/save_brm ; then echo "[!!] Failed to run save_brm" exit 1; fi + printf "%-${pf_offset}s ... " " - Stopping mcs-controllernode" if ! systemctl stop mcs-controllernode; then echo "[!!] Failed to stop mcs-controllernode" exit 1; + else + printf "Done\n" fi - clearShm - - printf "\n[+] Turning on columnstore ...\n" - start_cs_cmapi_via_curl + if [ $storage == "S3" ]; then + printf "%-${pf_offset}s ... " " - Stopping mcs-storagemanager" + if ! systemctl stop mcs-storagemanager ; then + echo "[!!] Failed to stop mcs-storagemanager " + exit 1; + else + printf "Done\n" + fi + fi; + + printf "%-${pf_offset}s ... " " - clearShm" + clearShm + printf "Done\n" + sleep 2 - # run a basic health check - backup_folder_to_restore="derp" - mariadb -e "create database if not exists $backup_folder_to_restore" - # confirm $backup_folder_to_restore doesnt exist - mariadb $backup_folder_to_restore -e "create table t1 (a int) engine=columnstore" - mariadb $backup_folder_to_restore -e "insert into t1 values (1)" - mariadb $backup_folder_to_restore -e "update t1 set a=1" - mariadb $backup_folder_to_restore -e "delete from t1 where a=1" - mariadb -e "drop database $backup_folder_to_restore" } process_backup() -{ +{ load_default_backup_variables; parse_backup_variables "$@"; print_backup_variables; check_for_dependancies "backup"; validation_prechecks_for_backup; + apply_backup_retention_policy issue_write_locks; run_save_brm; run_backup; @@ -3027,27 +3672,37 @@ process_restore() run_restore; } +print_mcs_bk_mgr_version_info() { + echo "MariaDB Columnstore Backup Manager" + echo "Version: $mcs_bk_manager_version" +} + case "$action" in - 'help' | '--help' | '-help' | '-h') + 'help' | '--help' | '-help' | '-h') print_action_help_text ;; - 'backup') + 'backup') process_backup "$@"; ;; - 'dbrm_backup') + 'dbrm_backup') process_dbrm_backup "$@"; ;; - 'dbrm_restore') - # for next release + 'dbrm_restore') process_dbrm_restore "$@"; ;; - 'restore') + 'restore') process_restore "$@"; ;; - *) - printf "\nunknown action: $action\n" + '-v' | 'version' ) + print_mcs_bk_mgr_version_info + ;; + 'source' ) + return 0; + ;; + *) + printf "\nunknown action: $action\n" print_action_help_text ;; esac -exit 0; \ No newline at end of file +exit 0; diff --git a/extra/cs_package_manager.sh b/extra/cs_package_manager.sh deleted file mode 100644 index 6346fb789..000000000 --- a/extra/cs_package_manager.sh +++ /dev/null @@ -1,839 +0,0 @@ -#!/bin/bash -# Documentation -# ./ cs_package_manager.sh help - -# Variables -enterprise_token="" -dev_drone_key="" - -if [ ! -f /var/lib/columnstore/local/module ]; then pm="pm1"; else pm=$(cat /var/lib/columnstore/local/module); fi; -pm_number=$(echo "$pm" | tr -dc '0-9') -action=$1 - -print_help_text() { - echo "Version 1.0 - -Example Remove: - bash cs_package_manager.sh remove - bash cs_package_manager.sh remove all - -Example Install: - bash cs_package_manager.sh install [enterprise|community|dev] [version|branch] [build num] - bash cs_package_manager.sh install enterprise 10.6.12-8 - bash cs_package_manager.sh install community 11.1 - bash cs_package_manager.sh install dev develop cron/8629 - bash cs_package_manager.sh install dev develop-23.02 pull_request/7256 - -Example Check: - bash cs_package_manager.sh check community - bash cs_package_manager.sh check enterprise -" -} - -wait_cs_down() { - retries=$1; - - if [ $retries -gt 6 ]; then - printf "\n[!!!] Columnstore is still online ... exiting \n\n" - exit 2; - fi; - - # if columnstore still online stop - if [ -z $(pidof PrimProc) ]; then - printf "[+] Confirmation: columnstore OFFLINE \n"; - mcs_offine=true - return 1; - else - printf "\n[+] Columnstore is ONLINE - waiting 5s to retry, attempt: $retries...\n"; - sleep 5 - ((retries++)) - wait_cs_down $retries - fi; -} - -print_and_delete() { - printf " - %-25s ..." "$1" - rm -rf $1 - printf " Done\n" -} - -init_cs_down() { - mcs_offine=false - if [ "$pm_number" == "1" ]; then - if [ -z $(pidof PrimProc) ]; then - # printf "\n[+] Columnstore offline already"; - mcs_offine=true - else - - # Adjust for package manager - cmapi_installed_command="" - case $package_manager in - yum ) - cmapi_installed_command="yum list installed MariaDB-columnstore-cmapi &> /dev/null;"; - ;; - apt ) - cmapi_installed_command="dpkg-query -s mariadb-columnstore-cmapi &> /dev/null;"; - ;; - *) # unknown option - echo "\npackage manager not implemented: $package_manager\n" - exit 2; - esac - - # Check cmapi installed - if eval $cmapi_installed_command ; then - - # Check for edge case - if [ "$(mcs cluster status | jq -r '.num_nodes')" == "0" ]; then - printf "[!!] Noticed cmapi installed but no nodes configured...\n" - add_primary_node_cmapi - sleep 1; - fi - - # Stop columnstore - printf "\n[+] Stopping columnstore ... \n"; - if command -v mcs &> /dev/null; then - if ! mcs cluster stop; then - echo "[!] Failed stopping via mcs ... trying cmapi curl" - stop_cs_cmapi_via_curl - fi - else - stop_cs_cmapi_via_curl - fi - else - stop_cs_via_systemctl - fi - fi - fi; -} - -do_yum_remove() { - - if ! command -v yum &> /dev/null ; then - printf "[!!] Cant access yum\n" - exit 1; - fi - - init_cs_down - - wait_cs_down 0 - - printf "[+] Removing packages - $(date) ... \n" - - if yum list installed MariaDB-server &>/dev/null; then - systemctl stop mariadb - fi - - if yum list installed MariaDB-columnstore-cmapi &>/dev/null; then - systemctl stop mariadb-columnstore-cmapi - fi - - # remove any mdb rpms on disk - if ls MariaDB-*.rpm &>/dev/null; then - print_and_delete "MariaDB-*.rpm" - fi - - # remove all current MDB packages - if yum list installed MariaDB-* &>/dev/null; then - yum remove MariaDB-* -y - fi - - # remove offical & custom yum repos - print_and_delete "/etc/yum.repos.d/mariadb.repo" - print_and_delete "/etc/yum.repos.d/drone.repo" -} - -do_apt_remove() { - - if ! command -v apt &> /dev/null ; then - printf "[!!] Cant access apt\n" - exit 1; - fi - - if ! command -v dpkg-query &> /dev/null ; then - printf "[!!] Cant access dpkg-query\n" - exit 1; - fi - - init_cs_down - - wait_cs_down 0 - - printf "[+] Removing packages - $(date) ... \n" - - if dpkg-query -s mariadb-server &>/dev/null; then - systemctl stop mariadb - fi - - if dpkg-query -s mariadb-columnstore-cmapi &>/dev/null; then - systemctl stop mariadb-columnstore-cmapi - fi - - # remove any mdb rpms on disk - if ls mariadb*.deb &>/dev/null; then - print_and_delete "mariadb*.deb" - fi - - # remove all current MDB packages - if [ "$(apt list --installed mariadb-* 2>/dev/null | wc -l)" -gt 1 ]; then - apt remove mariadb-* -y - fi - - # remove offical & custom yum repos - print_and_delete "/lib/systemd/system/mariadb.service" - print_and_delete "/lib/systemd/system/mariadb.service.d" - print_and_delete "/etc/apt/sources.list.d/mariadb.list" - print_and_delete "/etc/apt/sources.list.d/drone.list" - systemctl daemon-reload -} - -do_remove() { - - check_operating_system - check_package_managers - - case $distro_info in - centos | rocky ) - do_yum_remove - ;; - # debian ) - - # ;; - ubuntu ) - do_apt_remove - ;; - *) # unknown option - echo "\nos & version not implemented: $distro_info\n" - exit 2; - esac - - if [ "$2" == "all" ]; then - printf "\n[+] Removing all columnstore files & dirs\n" - print_and_delete "/var/lib/mysql/" - print_and_delete "/var/lib/columnstore/" - print_and_delete "/etc/my.cnf.d/*" - print_and_delete "/etc/columnstore/*" - fi; - - printf "\n[+] Done\n\n" -} - -check_package_managers() { - - package_manager=''; - if command -v apt &> /dev/null ; then - if ! command -v dpkg-query &> /dev/null ; then - printf "[!!] Cant access dpkg-query\n" - exit 1; - fi - package_manager="apt"; - fi - - if command -v yum &> /dev/null ; then - package_manager="yum"; - fi - - if [ $package_manager == '' ]; then - echo "[!!] No package manager found: yum or apt must be installed" - exit 1; - fi; -} - -check_operating_system() { - - distro_info=$(awk -F= '/^ID=/{gsub(/"/, "", $2); print $2}' /etc/os-release) - version_id=$(grep 'VERSION_ID=' /etc/os-release | awk -F= '{gsub(/"/, "", $2); print $2}' | awk -F. '{print $1}') - - echo "Distro: $distro_info" - echo "Version: $version_id" - - # distros=(centos7 debian11 debian12 rockylinux8 rockylinux9 ubuntu20.04 ubuntu22.04) - case $distro_info in - centos ) - distro="${distro_info}${version_id}" - ;; - debian ) - distro="${distro_info}${version_id}" - ;; - rocky ) - distro="rockylinux${version_id}" - ;; - ubuntu ) - distro="${distro_info}${version_id}" - ;; - *) # unknown option - printf "\ncheck_operating_system: unknown os & version: $distro_info\n" - exit 2; - esac -} - -check_cpu_architecture() { - - architecture=$(uname -m) - echo "CPU: $architecture" - - # arch=(amd64 arm64) - case $architecture in - x86_64 ) - arch="amd64" - ;; - aarch64 ) - arch="arm64" - ;; - *) # unknown option - echo "Error: Unsupported architecture ($architecture)" - esac -} - -check_no_mdb_installed() { - - packages="" - case $distro_info in - centos ) - packages=$(yum list installed | grep -i mariadb) - ;; - # debian ) - - # ;; - rocky ) - packages=$(yum list installed | grep -i mariadb) - ;; - ubuntu ) - packages=$(apt list --installed mariadb-* 2>/dev/null | grep -i mariadb); - ;; - *) # unknown option - printf "\ncheck_no_mdb_installed: os & version not implemented: $distro_info\n" - exit 2; - esac - - if [ -n "$packages" ]; then - printf "\nMariaDB packages are installed. Please uninstall them before continuing.\n" - echo $packages; - printf "Example: bash $0 remove\n\n" - exit 2; - fi; -} - -check_aws_cli_installed() { - - if ! command -v aws &> /dev/null ; then - echo "[!] aws cli - binary could not be found" - echo "[+] Installing aws cli ..." - rm -rf aws awscliv2.zip - yum install unzip -y; - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"; - unzip awscliv2.zip; - sudo ./aws/install; - mv /usr/local/bin/aws /usr/bin/aws; - aws configure set default.s3.max_concurrent_requests 700 - fi -} - -check_dev_build_exists() { - - if ! aws s3 ls $s3_path --no-sign-request &> /dev/null; then - printf "Defined dev build doesnt exist in aws\n\n" - exit 2; - fi; -} - -do_enterprise_apt_install() { - - # Install MariaDB - apt-get clean - apt install mariadb-server -y - sleep 2 - systemctl daemon-reload - systemctl enable mariadb - systemctl start mariadb - - # Install Columnstore - apt install mariadb-plugin-columnstore mariadb-columnstore-cmapi jq -y - systemctl daemon-reload - systemctl enable mariadb-columnstore-cmapi - systemctl start mariadb-columnstore-cmapi - mariadb -e "show status like '%Columnstore%';" - sleep 2 - - # Startup cmapi - if defined - add_primary_node_cmapi -} - -do_enterprise_yum_install() { - - # Install MariaDB - yum clean all - yum install MariaDB-server -y - sleep 2 - systemctl enable mariadb - systemctl start mariadb - - # Install Columnstore - yum install MariaDB-columnstore-engine MariaDB-columnstore-cmapi -y - systemctl enable mariadb-columnstore-cmapi - systemctl start mariadb-columnstore-cmapi - mariadb -e "show status like '%Columnstore%';" - sleep 1; - - # Startup cmapi - if defined - add_primary_node_cmapi -} - -enterprise_install() { - - version=$3 - if [ -z $enterprise_token ]; then - printf "Enterprise token empty: $enterprise_token\n" - printf "edit $0 to add token\n\n" - - exit 1; - fi; - - if [ -z $version ]; then - printf "Version empty: $version\n" - exit 1; - fi; - - echo "Token: $enterprise_token" - echo "MariaDB Version: $version" - - # Download Repo setup - rm -rf mariadb_es_repo_setup - wget https://dlm.mariadb.com/enterprise-release-helpers/mariadb_es_repo_setup ;chmod +x mariadb_es_repo_setup; - if ! bash mariadb_es_repo_setup --token="$enterprise_token" --apply --mariadb-server-version="$version"; then - echo "exiting ..." - exit 2; - fi; - - case $distro_info in - centos | rocky ) - do_enterprise_yum_install "$@" - ;; - # debian ) - - # ;; - ubuntu ) - do_enterprise_apt_install "$@" - ;; - *) # unknown option - printf "\nos & version not implemented: $distro_info\n" - exit 2; - esac -} - -community_install() { - - version=$3 - if [ -z $version ]; then - printf "Version empty: $version\n" - - exit 1; - fi; - - echo "MariaDB Community Version: $version" - - # Download Repo setup - rm -rf mariadb_repo_setup - - if ! curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | bash -s -- --mariadb-server-version=mariadb-$version ; then - echo "version bad. exiting ..." - exit 2; - fi; - yum clean all - - # Install MariaDB then Columnstore - yum install MariaDB-server -y - sleep 2; - systemctl enable mariadb - systemctl start mariadb - yum install MariaDB-columnstore-engine MariaDB-columnstore-cmapi -y - systemctl enable mariadb-columnstore-cmapi - systemctl start mariadb-columnstore-cmapi - mariadb -e "show status like '%Columnstore%';" - - add_primary_node_cmapi -} - -get_set_cmapi_key() { - - CMAPI_CNF="/etc/columnstore/cmapi_server.conf" - - if [ ! -f $CMAPI_CNF ]; then - echo "[!!] No cmapi config file found" - exit 1; - fi; - - # Add API Key if missing - if [ -z "$(grep ^x-api-key $CMAPI_CNF)" ]; then - - if ! command -v openssl &> /dev/null ; then - api_key="19bb89d77cb8edfe0864e05228318e3dfa58e8f45435fbd9bd12c462a522a1e9" - else - api_key=$(openssl rand -hex 32) - fi - # mcs cluster set api-key --key $primary_ip - echo "[+] Setting API Key:" - if curl -s https://127.0.0.1:8640/cmapi/0.4.0/cluster/status \ - --header 'Content-Type:application/json' \ - --header "x-api-key:$api_key" -k ; then - sleep 2; - else - echo "Failed to set API key" - exit 1; - fi - else - api_key=$(grep ^x-api-key $CMAPI_CNF | cut -d "=" -f 2 | tr -d " ") - fi -} - -add_node_cmapi_via_curl() { - - node_ip=$1 - if [ -z $api_key ]; then get_set_cmapi_key; fi; - - # Add Node - printf "\n[+] Adding primary node: \n" - if curl -k -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/node \ - --header 'Content-Type:application/json' \ - --header "x-api-key:$api_key" \ - --data "{\"timeout\": 120, \"node\": \"$node_ip\"}"; then - printf "\n[+] Success adding $node_ip\n" - else - echo "Failed adding node" - exit 1; - fi - -} - -start_cs_via_systemctl() { - if systemctl start mariadb-columnstore ; then - echo "[+] Started Columnstore" - else - echo "[!!] Failed to start columnstore" - exit 1; - fi; -} - -start_cs_cmapi_via_curl() { - - if [ -z $api_key ]; then get_set_cmapi_key; fi; - - if curl -k -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/start \ - --header 'Content-Type:application/json' \ - --header "x-api-key:$api_key" \ - --data '{"timeout":20}'; then - echo "[+] Started Columnstore" - else - echo "[!] Failed to start columnstore" - echo "Trying via systemctl ..." - start_cs_via_systemctl - fi; -} - -stop_cs_via_systemctl() { - if systemctl stop mariadb-columnstore ; then - echo "[+] Stopped Columnstore" - else - echo "[!!] Failed to stop columnstore" - exit 1; - fi; -} - -stop_cs_cmapi_via_curl() { - - if [ -z $api_key ]; then get_set_cmapi_key; fi; - - if curl -k -s -X PUT https://127.0.0.1:8640/cmapi/0.4.0/cluster/shutdown \ - --header 'Content-Type:application/json' \ - --header "x-api-key:$api_key" \ - --data '{"timeout":20}'; then - echo "[+] Stopped Columnstore" - else - echo "[!] Failed to stop columnstore via cmapi" - echo "Trying via systemctl ..." - stop_cs_via_systemctl - fi; -} - -add_primary_node_cmapi() { - - primary_ip="127.0.0.1" - get_set_cmapi_key - - if ! command -v mcs &> /dev/null ; then - echo "mcs - binary could not be found" - add_node_cmapi_via_curl $primary_ip - echo "[+] Starting Columnstore ..." - start_cs_cmapi_via_curl - - else - if [ "$(mcs cluster status | jq -r '.num_nodes')" == "0" ]; then - - echo "[+] Adding primary node ..." - if timeout 30s mcs cluster node add --node $primary_ip; then - echo "[+] Success adding $primary_ip" - else - echo "[!] Failed ... trying cmapi curl" - add_node_cmapi_via_curl $primary_ip - fi; - fi; - echo "[+] Starting Columnstore ..." - mcs cluster start - fi -} - -dev_install() { - - if [ -z $dev_drone_key ]; then printf "Missing dev_drone_key: \n"; exit; fi; - check_aws_cli_installed - - echo "Branch: $3" - echo "Build: $4" - dronePath="s3://$dev_drone_key" - branch="$3" - build="$4" - product="10.6-enterprise" - - # Construct URLs - s3_path="$dronePath/$branch/$build/$product/$arch/$distro" - replace="https://$dev_drone_key.s3.amazonaws.com/" - # Use sed to replace the s3 path to create https path - yum_http=$(echo "$s3_path" | sed "s|s3://$dev_drone_key/|$replace|") - echo "Locations:" - echo "RPM: $s3_path" - echo "Yum: $yum_http" - echo "###################################" - - check_dev_build_exists - - echo "[drone] -name=Drone Repository -baseurl="$yum_http" -gpgcheck=0 -enabled=1 - " > /etc/yum.repos.d/drone.repo - yum clean all - # yum makecache - # yum list --disablerepo="*" --enablerepo="drone" - - # ALL RPMS: aws s3 cp $s3_path/ . --recursive --exclude "debuginfo" --include "*.rpm" - aws s3 cp $s3_path/ . --recursive --exclude "*" --include "MariaDB-server*" --exclude "*debug*" --no-sign-request - - # Confirm Downloaded server rpm - if ! ls MariaDB-server-*.rpm 1> /dev/null 2>&1; then - echo "Error: No MariaDB-server RPMs were found." - exit 1 - fi - - yum install MariaDB-server-*.rpm -y - yum install MariaDB-columnstore-engine MariaDB-columnstore-cmapi -y - systemctl start mariadb - systemctl start mariadb-columnstore-cmapi - mariadb -e "show status like '%Columnstore%';" - - add_primary_node_cmapi -} - -do_install() { - - check_operating_system - check_cpu_architecture - check_no_mdb_installed - - repo=$2 - echo "Repository: $repo" - case $repo in - enterprise ) - # pull from public enterprise repo - enterprise_install "$@" ; - ;; - community ) - # pull from public community repo - community_install "$@" ; - ;; - dev ) - dev_install "$@" ; - ;; - *) # unknown option - echo "Unknown repo: $repo\n" - exit 2; - esac - - printf "\nDone - $(date)\n" -} - -do_check() { - - check_operating_system - check_cpu_architecture - - repo=$2 - echo "Repository: $repo" - case $repo in - enterprise ) - - if [ -z $enterprise_token ]; then - printf "Enterprise token empty: $enterprise_token\n" - printf "edit $0 to add token\n\n" - exit 1; - fi; - - url_base="https://dlm.mariadb.com" - url_page="/browse/$enterprise_token/mariadb_enterprise_server/" - ignore="/login" - curl -s "$url_base$url_page" > mdb-tmp.html - major_version_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep $url_page | grep -v $ignore ) - #echo $major_version_links - for major_link in ${major_version_links[@]} - do - #echo "Major: $major_link" - curl -s "$url_base$major_link" > mdb-tmp.html - minor_version_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep $url_page | grep -v $ignore ) - for minor_link in ${minor_version_links[@]} - do - if [ "$minor_link" != "$url_page" ]; then - #echo " Minor: $minor_link" - case $distro_info in - centos | rocky ) - path="rpm/rhel/$version_id/$architecture/rpms/" - curl -s "$url_base$minor_link$path" > mdb-tmp.html - package_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep "$path" | grep "columnstore-engine" | grep -v debug | tail -1 ) - if [ ! -z "$package_links" ]; then - #echo "----------" - #echo "$package_links" - - mariadb_version="${package_links#*mariadb-enterprise-server/}" - columnstore_version="${mariadb_version#*columnstore-engine-}" - mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" - - # unqiue to enterprise - standard_mariadb_version="${mariadb_version//-/_}" - columnstore_version="$( echo $columnstore_version | awk -F"${standard_mariadb_version}_" '{print $2}' | awk -F".el" '{print $1}' )" - printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; - fi; - ;; - ubuntu ) - path="deb/pool/main/m/" - curl -s "$url_base$minor_link$path" > mdb-tmp.html - - # unqiue - this link/path can change - mariadb_version_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep -v $ignore | grep -v cmapi | grep ^mariadb ) - #echo "$url_base$minor_link$path" - for mariadb_link in ${mariadb_version_links[@]} - do - #echo $mariadb_link - path="deb/pool/main/m/$mariadb_link" - curl -s "$url_base$minor_link$path" > mdb-tmp.html - package_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep "$path" | grep "columnstore_" | grep -v debug | tail -1 ) - if [ ! -z "$package_links" ]; then - # echo "$package_links" - # echo "----------" - mariadb_version="${package_links#*mariadb-enterprise-server/}" - columnstore_version="${mariadb_version#*columnstore-engine-}" - mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" - columnstore_version="$( echo $columnstore_version | awk -F"columnstore_" '{print $2}' | awk -F"-" '{print $2}' | awk -F"+maria" '{print $1}' )" - # echo "MariaDB: $mariadb_version Columnstore: $columnstore_version" - printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; - fi; - done - - - ;; - *) # unknown option - printf "Not implemented for: $distro_info\n" - exit 2; - esac - - - fi; - done - done - ;; - community ) - - # pull from public community repo - url_base="https://dlm.mariadb.com" - url_page="/browse/mariadb_server/" - ignore="/login" - curl -s "$url_base$url_page" > mdb-tmp.html - major_version_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep $url_page | grep -v $ignore ) - - for major_link in ${major_version_links[@]} - do - #echo "Major: $major_link" - curl -s "$url_base$major_link" > mdb-tmp.html - minor_version_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep $url_page | grep -v $ignore ) - for minor_link in ${minor_version_links[@]} - do - if [ "$minor_link" != "$url_page" ]; then - #echo " Minor: $minor_link" - case $distro_info in - centos | rocky ) - path="yum/centos/$version_id/$architecture/rpms/" - curl -s "$url_base$minor_link$path" > mdb-tmp.html - package_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep "$path" | grep "columnstore-engine" | grep -v debug | tail -1 ) - if [ ! -z "$package_links" ]; then - # echo "$package_links" - # echo "----------" - mariadb_version="${package_links#*mariadb-}" - columnstore_version="${mariadb_version#*columnstore-engine-}" - mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" - columnstore_version="$( echo $columnstore_version | awk -F_ '{print $2}' | awk -F".el" '{print $1}' )" - # echo "MariaDB: $mariadb_version Columnstore: $columnstore_version" - printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; - fi; - ;; - ubuntu ) - path="repo/$distro_info/pool/main/m/mariadb/" - curl -s "$url_base$minor_link$path" > mdb-tmp.html - package_links=$(grep -oP 'href="\K[^"]+' mdb-tmp.html | grep "$path" | grep "columnstore_" | grep -v debug | tail -1 ) - if [ ! -z "$package_links" ]; then - # echo "$package_links" - # echo "----------" - mariadb_version="${package_links#*mariadb-}" - columnstore_version="${mariadb_version#*columnstore-engine-}" - mariadb_version="$( echo $mariadb_version | awk -F/ '{print $1}' )" - columnstore_version="$( echo $columnstore_version | awk -F"columnstore_" '{print $2}' | awk -F"-" '{print $2}' | awk -F"+maria" '{print $1}' )" - # echo "MariaDB: $mariadb_version Columnstore: $columnstore_version" - printf "%-8s %-12s %-12s %-12s\n" "MariaDB:" "$mariadb_version" "Columnstore:" "$columnstore_version"; - fi; - ;; - *) # unknown option - printf "Not implemented for: $distro_info\n" - exit 2; - esac - - - fi; - done - done - ;; - dev ) - printf "Not implemented for: $repo\n" - exit 1; - ;; - *) # unknown option - printf "Unknown repo: $repo\n" - exit 2; - esac -} - - -case $action in - remove ) - do_remove "$@" ; - ;; - install ) - do_install "$@"; - ;; - check ) - do_check "$@" - ;; - help | -h | --help | -help) - print_help_text; - exit 1; - ;; - *) # unknown option - printf "Unknown Action: $1\n" - print_help_text - exit 2; -esac