You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-08-08 14:22:09 +03:00
Typo fixes
This commit is contained in:
@@ -220,10 +220,10 @@ if __name__ == '__main__':
|
|||||||
'Something went wrong while trying to detect dbrm protocol.\n'
|
'Something went wrong while trying to detect dbrm protocol.\n'
|
||||||
'Seems "controllernode" process isn\'t started.\n'
|
'Seems "controllernode" process isn\'t started.\n'
|
||||||
'This is just a notification, not a problem.\n'
|
'This is just a notification, not a problem.\n'
|
||||||
'Next detection will started at first node\\cluster '
|
'Next detection will start at first node\\cluster '
|
||||||
'status check.\n'
|
'status check.\n'
|
||||||
f'This can cause extra {SOCK_TIMEOUT} seconds delay while\n'
|
f'This can cause extra {SOCK_TIMEOUT} seconds delay during\n'
|
||||||
'first attempt to get status.',
|
'this first attempt to get the status.',
|
||||||
exc_info=True
|
exc_info=True
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@@ -495,7 +495,7 @@ class ConfigController:
|
|||||||
attempts +=1
|
attempts +=1
|
||||||
if attempts >= 10:
|
if attempts >= 10:
|
||||||
module_logger.debug(
|
module_logger.debug(
|
||||||
'Timed out waiting for node to be ready.'
|
'Timed out while waiting for the node to become ready.'
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
@@ -1204,7 +1204,7 @@ class AppController():
|
|||||||
if AppManager.started:
|
if AppManager.started:
|
||||||
return {'started': True}
|
return {'started': True}
|
||||||
else:
|
else:
|
||||||
raise APIError(503, 'CMAPI not ready to handle requests.')
|
raise APIError(503, 'CMAPI is not ready to handle requests.')
|
||||||
|
|
||||||
|
|
||||||
class NodeProcessController():
|
class NodeProcessController():
|
||||||
@@ -1232,7 +1232,7 @@ class NodeProcessController():
|
|||||||
name='DMLProc', is_primary=True, use_sudo=True, timeout=timeout
|
name='DMLProc', is_primary=True, use_sudo=True, timeout=timeout
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
module_logger.debug('Callling stop DMLproc gracefully.')
|
module_logger.debug('Calling stop DMLproc gracefully.')
|
||||||
try:
|
try:
|
||||||
MCSProcessManager.gracefully_stop_dmlproc()
|
MCSProcessManager.gracefully_stop_dmlproc()
|
||||||
except (ConnectionRefusedError, RuntimeError):
|
except (ConnectionRefusedError, RuntimeError):
|
||||||
|
@@ -346,7 +346,7 @@ def broadcast_new_config(
|
|||||||
# TODO: do not restart cluster when put xml config only with
|
# TODO: do not restart cluster when put xml config only with
|
||||||
# distribute secrets
|
# distribute secrets
|
||||||
if not CEJPasswordHandler.secretsfile_exists():
|
if not CEJPasswordHandler.secretsfile_exists():
|
||||||
logging.debug('No .secrets file found so not distrinuting it.')
|
logging.debug('No .secrets file found so not distributing it.')
|
||||||
else:
|
else:
|
||||||
secrets = CEJPasswordHandler.get_secrets_json()
|
secrets = CEJPasswordHandler.get_secrets_json()
|
||||||
body['secrets'] = secrets
|
body['secrets'] = secrets
|
||||||
@@ -809,7 +809,7 @@ def get_cej_info(config_root):
|
|||||||
):
|
):
|
||||||
logging.error(
|
logging.error(
|
||||||
'CrossengineSupport password seems to be encrypted '
|
'CrossengineSupport password seems to be encrypted '
|
||||||
'but no .secrets file exist. May be it\'s eventually removed.'
|
'but no .secrets file exist. Maybe it was eventually removed.'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -819,7 +819,7 @@ def get_cej_info(config_root):
|
|||||||
else:
|
else:
|
||||||
logging.error(
|
logging.error(
|
||||||
'CrossengineSupport password seems to be unencrypted but '
|
'CrossengineSupport password seems to be unencrypted but '
|
||||||
'.secrets file exist. May be .secrets file generated by '
|
'.secrets file exist. Maybe .secrets file was generated by '
|
||||||
'mistake or password left encrypted after using cskeys '
|
'mistake or password left encrypted after using cskeys '
|
||||||
'utility.'
|
'utility.'
|
||||||
)
|
)
|
||||||
|
@@ -64,7 +64,7 @@ class BaseDispatcher:
|
|||||||
encoding='utf-8'
|
encoding='utf-8'
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.error(f'Failed on run command "{command}".', exc_info=True)
|
logging.error(f'Failed to run command "{command}".', exc_info=True)
|
||||||
# TODO: cmapi have to close with exception here
|
# TODO: cmapi have to close with exception here
|
||||||
# to stop docker container?
|
# to stop docker container?
|
||||||
# raise
|
# raise
|
||||||
@@ -79,7 +79,7 @@ class BaseDispatcher:
|
|||||||
del proc
|
del proc
|
||||||
result = (True, output)
|
result = (True, output)
|
||||||
else:
|
else:
|
||||||
logging.debug('Waiting for command to finish.')
|
logging.debug('Waiting for the command to finish.')
|
||||||
stdout_str, _ = proc.communicate()
|
stdout_str, _ = proc.communicate()
|
||||||
returncode = proc.wait()
|
returncode = proc.wait()
|
||||||
if stdout_str is not None:
|
if stdout_str is not None:
|
||||||
|
@@ -43,7 +43,7 @@ if __name__ == '__main__':
|
|||||||
if (storage.lower() != 's3' and master_addr != default_addr) or \
|
if (storage.lower() != 's3' and master_addr != default_addr) or \
|
||||||
master_addr == default_addr:
|
master_addr == default_addr:
|
||||||
is_primary = True
|
is_primary = True
|
||||||
print('Multi-node with local-storage detected.')
|
print('Multi-node configuration with local storage detected.')
|
||||||
else:
|
else:
|
||||||
has_requests = False
|
has_requests = False
|
||||||
try:
|
try:
|
||||||
@@ -55,7 +55,7 @@ master_addr == default_addr:
|
|||||||
Please install CMAPI first.')
|
Please install CMAPI first.')
|
||||||
if has_requests is True:
|
if has_requests is True:
|
||||||
try:
|
try:
|
||||||
print('Requesting for the primary node status.')
|
print('Requesting the status of the primary node.')
|
||||||
api_version = get_version()
|
api_version = get_version()
|
||||||
api_port = get_port()
|
api_port = get_port()
|
||||||
url = "https://{}:{}/cmapi/{}/node/primary".format(default_addr, \
|
url = "https://{}:{}/cmapi/{}/node/primary".format(default_addr, \
|
||||||
@@ -64,12 +64,11 @@ master_addr == default_addr:
|
|||||||
verify=False,
|
verify=False,
|
||||||
timeout=REST_REQUEST_TO)
|
timeout=REST_REQUEST_TO)
|
||||||
if (resp.status_code != 200):
|
if (resp.status_code != 200):
|
||||||
print("Error sending GET /node/primary.")
|
print("Error occurred while sending GET /node/primary.")
|
||||||
else:
|
else:
|
||||||
is_primary = resp.json()['is_primary'] == 'True'
|
is_primary = resp.json()['is_primary'] == 'True'
|
||||||
except:
|
except:
|
||||||
print('Failed to request.')
|
print('Failed to send request.')
|
||||||
print(str(e))
|
|
||||||
|
|
||||||
if is_primary is True:
|
if is_primary is True:
|
||||||
try:
|
try:
|
||||||
|
@@ -181,7 +181,7 @@ class AgentComm:
|
|||||||
self.__runner()
|
self.__runner()
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.error(
|
logger.error(
|
||||||
'AgentComm.runner(): got an unrecognised exception.',
|
'AgentComm.runner(): got an unhandled exception.',
|
||||||
exc_info=True
|
exc_info=True
|
||||||
)
|
)
|
||||||
if not self._die:
|
if not self._die:
|
||||||
@@ -227,7 +227,7 @@ class AgentComm:
|
|||||||
|
|
||||||
if needs_transaction:
|
if needs_transaction:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'Failover starts transaction to run upcoming event.'
|
'Failover is starting a transaction to process the upcoming event.'
|
||||||
)
|
)
|
||||||
(txn_id, nodes) = self._agent.startTransaction(
|
(txn_id, nodes) = self._agent.startTransaction(
|
||||||
extra_nodes=list(nodes_added),
|
extra_nodes=list(nodes_added),
|
||||||
|
@@ -77,7 +77,7 @@ class NodeMonitor:
|
|||||||
)
|
)
|
||||||
if not self._die:
|
if not self._die:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
self._logger.info("node monitor logic exiting normally...")
|
self._logger.info("Node monitor logic exited normally...")
|
||||||
|
|
||||||
def _monitor(self):
|
def _monitor(self):
|
||||||
"""
|
"""
|
||||||
@@ -138,7 +138,7 @@ class NodeMonitor:
|
|||||||
self._inStandby = False
|
self._inStandby = False
|
||||||
# has it been deactivated?
|
# has it been deactivated?
|
||||||
else:
|
else:
|
||||||
self._logger.trace('Node not in active nodes, do nothing.')
|
self._logger.trace('Node is not in active nodes, no action taken.')
|
||||||
self._inStandby = True
|
self._inStandby = True
|
||||||
continue # wait to be activated
|
continue # wait to be activated
|
||||||
|
|
||||||
@@ -191,7 +191,7 @@ class NodeMonitor:
|
|||||||
# if we are in a cohort that has <= 50% of the desired nodes, enter standby
|
# if we are in a cohort that has <= 50% of the desired nodes, enter standby
|
||||||
if len(activeNodes)/len(desiredNodes) <= 0.5 and len(effectiveActiveNodeList)/len(desiredNodes) <= 0.5:
|
if len(activeNodes)/len(desiredNodes) <= 0.5 and len(effectiveActiveNodeList)/len(desiredNodes) <= 0.5:
|
||||||
if not inStandbyMode:
|
if not inStandbyMode:
|
||||||
msg = "Only {} out of {} nodes are active. At least {} are required. Entering standby mode to protect the system."\
|
msg = "Only {} out of {} nodes are active. At least {} are required. Entering standby mode to protect the system."\
|
||||||
.format(len(activeNodes), len(desiredNodes), int(len(desiredNodes)/2) + 1)
|
.format(len(activeNodes), len(desiredNodes), int(len(desiredNodes)/2) + 1)
|
||||||
self._agentComm.raiseAlarm(msg)
|
self._agentComm.raiseAlarm(msg)
|
||||||
self._logger.critical(msg)
|
self._logger.critical(msg)
|
||||||
|
@@ -23,7 +23,7 @@ def handle_output(func):
|
|||||||
typer.echo(err.message, err=True)
|
typer.echo(err.message, err=True)
|
||||||
logger.error('Error during command execution', exc_info=True)
|
logger.error('Error during command execution', exc_info=True)
|
||||||
except typer.BadParameter as err:
|
except typer.BadParameter as err:
|
||||||
logger.error('Bad command line parameter.')
|
logger.error('Invalid command line parameter.')
|
||||||
raise err
|
raise err
|
||||||
except typer.Exit as err: # if some command used typer.Exit
|
except typer.Exit as err: # if some command used typer.Exit
|
||||||
#TODO: think about universal protocol to return json data and
|
#TODO: think about universal protocol to return json data and
|
||||||
@@ -31,10 +31,10 @@ def handle_output(func):
|
|||||||
return_code = err.exit_code
|
return_code = err.exit_code
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.error(
|
logger.error(
|
||||||
'Undefined error during command execution',
|
'An undefined error occurred during command execution',
|
||||||
exc_info=True
|
exc_info=True
|
||||||
)
|
)
|
||||||
typer.echo('Unknown error, check the log file.', err=True)
|
typer.echo('An unknown error occurred, please check the log file.', err=True)
|
||||||
|
|
||||||
raise typer.Exit(return_code)
|
raise typer.Exit(return_code)
|
||||||
return wrapper
|
return wrapper
|
||||||
|
@@ -41,7 +41,7 @@ class DBRM:
|
|||||||
master_conn_info = node_config.get_dbrm_conn_info(root)
|
master_conn_info = node_config.get_dbrm_conn_info(root)
|
||||||
if master_conn_info is None:
|
if master_conn_info is None:
|
||||||
module_logger.warning(
|
module_logger.warning(
|
||||||
'DBRB.connect: No DBRM info in the Columnstore.xml.'
|
'DBRB.connect: No DBRM info found in the Columnstore.xml.'
|
||||||
)
|
)
|
||||||
dbrm_host = master_conn_info['IPAddr'] or DEFAULT_HOST
|
dbrm_host = master_conn_info['IPAddr'] or DEFAULT_HOST
|
||||||
dbrm_port = int(master_conn_info['Port']) or DEFAULT_PORT
|
dbrm_port = int(master_conn_info['Port']) or DEFAULT_PORT
|
||||||
|
@@ -256,7 +256,7 @@ class NodeConfig:
|
|||||||
# Raise an appropriate exception
|
# Raise an appropriate exception
|
||||||
module_logger.error(
|
module_logger.error(
|
||||||
f'{self.apply_config.__name__} throws an exception.'
|
f'{self.apply_config.__name__} throws an exception.'
|
||||||
'The original config must be restored by '
|
'The original config must be restored by an '
|
||||||
'explicit ROLLBACK command or timeout.',
|
'explicit ROLLBACK command or timeout.',
|
||||||
exc_info=True
|
exc_info=True
|
||||||
)
|
)
|
||||||
|
Reference in New Issue
Block a user