diff --git a/cmapi/cmapi_server/__main__.py b/cmapi/cmapi_server/__main__.py index 32e25a71e..cf55b5c7f 100644 --- a/cmapi/cmapi_server/__main__.py +++ b/cmapi/cmapi_server/__main__.py @@ -220,10 +220,10 @@ if __name__ == '__main__': 'Something went wrong while trying to detect dbrm protocol.\n' 'Seems "controllernode" process isn\'t started.\n' 'This is just a notification, not a problem.\n' - 'Next detection will started at first node\\cluster ' + 'Next detection will start at first node\\cluster ' 'status check.\n' - f'This can cause extra {SOCK_TIMEOUT} seconds delay while\n' - 'first attempt to get status.', + f'This can cause extra {SOCK_TIMEOUT} seconds delay during\n' + 'this first attempt to get the status.', exc_info=True ) else: diff --git a/cmapi/cmapi_server/controllers/endpoints.py b/cmapi/cmapi_server/controllers/endpoints.py index 870e07b35..269337f9d 100644 --- a/cmapi/cmapi_server/controllers/endpoints.py +++ b/cmapi/cmapi_server/controllers/endpoints.py @@ -495,7 +495,7 @@ class ConfigController: attempts +=1 if attempts >= 10: module_logger.debug( - 'Timed out waiting for node to be ready.' + 'Timed out while waiting for the node to become ready.' ) break time.sleep(1) @@ -1204,7 +1204,7 @@ class AppController(): if AppManager.started: return {'started': True} else: - raise APIError(503, 'CMAPI not ready to handle requests.') + raise APIError(503, 'CMAPI is not ready to handle requests.') class NodeProcessController(): @@ -1232,7 +1232,7 @@ class NodeProcessController(): name='DMLProc', is_primary=True, use_sudo=True, timeout=timeout ) else: - module_logger.debug('Callling stop DMLproc gracefully.') + module_logger.debug('Calling stop DMLproc gracefully.') try: MCSProcessManager.gracefully_stop_dmlproc() except (ConnectionRefusedError, RuntimeError): diff --git a/cmapi/cmapi_server/helpers.py b/cmapi/cmapi_server/helpers.py index 53fb003e7..d9544941c 100644 --- a/cmapi/cmapi_server/helpers.py +++ b/cmapi/cmapi_server/helpers.py @@ -346,7 +346,7 @@ def broadcast_new_config( # TODO: do not restart cluster when put xml config only with # distribute secrets if not CEJPasswordHandler.secretsfile_exists(): - logging.debug('No .secrets file found so not distrinuting it.') + logging.debug('No .secrets file found so not distributing it.') else: secrets = CEJPasswordHandler.get_secrets_json() body['secrets'] = secrets @@ -809,7 +809,7 @@ def get_cej_info(config_root): ): logging.error( 'CrossengineSupport password seems to be encrypted ' - 'but no .secrets file exist. May be it\'s eventually removed.' + 'but no .secrets file exist. Maybe it was eventually removed.' ) @@ -819,7 +819,7 @@ def get_cej_info(config_root): else: logging.error( 'CrossengineSupport password seems to be unencrypted but ' - '.secrets file exist. May be .secrets file generated by ' + '.secrets file exist. Maybe .secrets file was generated by ' 'mistake or password left encrypted after using cskeys ' 'utility.' ) diff --git a/cmapi/cmapi_server/process_dispatchers/base.py b/cmapi/cmapi_server/process_dispatchers/base.py index 84d252d51..f0fa64305 100644 --- a/cmapi/cmapi_server/process_dispatchers/base.py +++ b/cmapi/cmapi_server/process_dispatchers/base.py @@ -64,7 +64,7 @@ class BaseDispatcher: encoding='utf-8' ) except Exception: - logging.error(f'Failed on run command "{command}".', exc_info=True) + logging.error(f'Failed to run command "{command}".', exc_info=True) # TODO: cmapi have to close with exception here # to stop docker container? # raise @@ -79,7 +79,7 @@ class BaseDispatcher: del proc result = (True, output) else: - logging.debug('Waiting for command to finish.') + logging.debug('Waiting for the command to finish.') stdout_str, _ = proc.communicate() returncode = proc.wait() if stdout_str is not None: diff --git a/cmapi/engine_files/mcs-savebrm.py b/cmapi/engine_files/mcs-savebrm.py index dfdbcb652..5ba3fd2f4 100755 --- a/cmapi/engine_files/mcs-savebrm.py +++ b/cmapi/engine_files/mcs-savebrm.py @@ -43,7 +43,7 @@ if __name__ == '__main__': if (storage.lower() != 's3' and master_addr != default_addr) or \ master_addr == default_addr: is_primary = True - print('Multi-node with local-storage detected.') + print('Multi-node configuration with local storage detected.') else: has_requests = False try: @@ -55,7 +55,7 @@ master_addr == default_addr: Please install CMAPI first.') if has_requests is True: try: - print('Requesting for the primary node status.') + print('Requesting the status of the primary node.') api_version = get_version() api_port = get_port() url = "https://{}:{}/cmapi/{}/node/primary".format(default_addr, \ @@ -64,12 +64,11 @@ master_addr == default_addr: verify=False, timeout=REST_REQUEST_TO) if (resp.status_code != 200): - print("Error sending GET /node/primary.") + print("Error occurred while sending GET /node/primary.") else: is_primary = resp.json()['is_primary'] == 'True' except: - print('Failed to request.') - print(str(e)) + print('Failed to send request.') if is_primary is True: try: diff --git a/cmapi/failover/agent_comm.py b/cmapi/failover/agent_comm.py index d9e169138..ddbc87659 100644 --- a/cmapi/failover/agent_comm.py +++ b/cmapi/failover/agent_comm.py @@ -181,7 +181,7 @@ class AgentComm: self.__runner() except Exception: logger.error( - 'AgentComm.runner(): got an unrecognised exception.', + 'AgentComm.runner(): got an unhandled exception.', exc_info=True ) if not self._die: @@ -227,7 +227,7 @@ class AgentComm: if needs_transaction: logger.debug( - 'Failover starts transaction to run upcoming event.' + 'Failover is starting a transaction to process the upcoming event.' ) (txn_id, nodes) = self._agent.startTransaction( extra_nodes=list(nodes_added), diff --git a/cmapi/failover/node_monitor.py b/cmapi/failover/node_monitor.py index cbcf758de..74aaf03c8 100644 --- a/cmapi/failover/node_monitor.py +++ b/cmapi/failover/node_monitor.py @@ -77,7 +77,7 @@ class NodeMonitor: ) if not self._die: time.sleep(1) - self._logger.info("node monitor logic exiting normally...") + self._logger.info("Node monitor logic exited normally...") def _monitor(self): """ @@ -138,7 +138,7 @@ class NodeMonitor: self._inStandby = False # has it been deactivated? else: - self._logger.trace('Node not in active nodes, do nothing.') + self._logger.trace('Node is not in active nodes, no action taken.') self._inStandby = True continue # wait to be activated @@ -191,7 +191,7 @@ class NodeMonitor: # if we are in a cohort that has <= 50% of the desired nodes, enter standby if len(activeNodes)/len(desiredNodes) <= 0.5 and len(effectiveActiveNodeList)/len(desiredNodes) <= 0.5: if not inStandbyMode: - msg = "Only {} out of {} nodes are active. At least {} are required. Entering standby mode to protect the system."\ + msg = "Only {} out of {} nodes are active. At least {} are required. Entering standby mode to protect the system."\ .format(len(activeNodes), len(desiredNodes), int(len(desiredNodes)/2) + 1) self._agentComm.raiseAlarm(msg) self._logger.critical(msg) diff --git a/cmapi/mcs_cluster_tool/decorators.py b/cmapi/mcs_cluster_tool/decorators.py index e26c6fac8..4a7a322d9 100644 --- a/cmapi/mcs_cluster_tool/decorators.py +++ b/cmapi/mcs_cluster_tool/decorators.py @@ -23,7 +23,7 @@ def handle_output(func): typer.echo(err.message, err=True) logger.error('Error during command execution', exc_info=True) except typer.BadParameter as err: - logger.error('Bad command line parameter.') + logger.error('Invalid command line parameter.') raise err except typer.Exit as err: # if some command used typer.Exit #TODO: think about universal protocol to return json data and @@ -31,10 +31,10 @@ def handle_output(func): return_code = err.exit_code except Exception: logger.error( - 'Undefined error during command execution', + 'An undefined error occurred during command execution', exc_info=True ) - typer.echo('Unknown error, check the log file.', err=True) + typer.echo('An unknown error occurred, please check the log file.', err=True) raise typer.Exit(return_code) return wrapper diff --git a/cmapi/mcs_node_control/models/dbrm.py b/cmapi/mcs_node_control/models/dbrm.py index f0d64cc2e..b67c84300 100644 --- a/cmapi/mcs_node_control/models/dbrm.py +++ b/cmapi/mcs_node_control/models/dbrm.py @@ -41,7 +41,7 @@ class DBRM: master_conn_info = node_config.get_dbrm_conn_info(root) if master_conn_info is None: module_logger.warning( - 'DBRB.connect: No DBRM info in the Columnstore.xml.' + 'DBRB.connect: No DBRM info found in the Columnstore.xml.' ) dbrm_host = master_conn_info['IPAddr'] or DEFAULT_HOST dbrm_port = int(master_conn_info['Port']) or DEFAULT_PORT diff --git a/cmapi/mcs_node_control/models/node_config.py b/cmapi/mcs_node_control/models/node_config.py index 7dac18bce..8525a86b0 100644 --- a/cmapi/mcs_node_control/models/node_config.py +++ b/cmapi/mcs_node_control/models/node_config.py @@ -256,7 +256,7 @@ class NodeConfig: # Raise an appropriate exception module_logger.error( f'{self.apply_config.__name__} throws an exception.' - 'The original config must be restored by ' + 'The original config must be restored by an ' 'explicit ROLLBACK command or timeout.', exc_info=True )