You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-08-07 03:22:57 +03:00
MCOL-520
This commit is contained in:
@@ -8799,13 +8799,17 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
|
||||
string glustercmd = "gluster ";
|
||||
|
||||
errmsg = "";
|
||||
string SUDO = "";
|
||||
if ( user != "root" )
|
||||
SUDO = "sudo ";
|
||||
|
||||
string errmsg = "";
|
||||
|
||||
switch ( command )
|
||||
{
|
||||
case (oam::GLUSTER_STATUS):
|
||||
{
|
||||
string command = glustercmd + "volume status";
|
||||
string command = SUDO + glustercmd + "volume status";
|
||||
|
||||
char buffer[128];
|
||||
string result = "";
|
||||
@@ -9010,7 +9014,7 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
}
|
||||
|
||||
sleep(5);
|
||||
command = glustercmd + "peer status " + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
command = SUDO + glustercmd + "peer status " + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
status = system(command.c_str());
|
||||
|
||||
if (WEXITSTATUS(status) != 0 )
|
||||
@@ -9032,7 +9036,7 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
{
|
||||
int newDbrootID = db + 1;
|
||||
|
||||
command = glustercmd + "volume create dbroot" + itoa(newDbrootID) + " transport tcp replica " + itoa(dataRedundancyCopies) + " ";
|
||||
command = SUDO + glustercmd + "volume create dbroot" + itoa(newDbrootID) + " transport tcp replica " + itoa(dataRedundancyCopies) + " ";
|
||||
|
||||
vector<int>::iterator dbrootPmIter = dbrootPms[db].begin();
|
||||
|
||||
@@ -9074,7 +9078,7 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
}
|
||||
}
|
||||
|
||||
command = glustercmd + "volume start dbroot" + itoa(newDbrootID) + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
command = SUDO + glustercmd + "volume start dbroot" + itoa(newDbrootID) + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
status = system(command.c_str());
|
||||
|
||||
if (WEXITSTATUS(status) != 0 )
|
||||
@@ -9121,7 +9125,7 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
int status;
|
||||
writeLog("glusterctl: GLUSTER_DELETE: dbroot = " + dbrootID, LOG_TYPE_DEBUG );
|
||||
|
||||
command = glustercmd + "--mode=script volume stop dbroot" + dbrootID + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
command = SUDO + glustercmd + "--mode=script volume stop dbroot" + dbrootID + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
|
||||
status = system(command.c_str());
|
||||
|
||||
@@ -9134,7 +9138,7 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
// give time for transaction to finish after stopping
|
||||
sleep(10);
|
||||
|
||||
command = glustercmd + " --mode=script volume delete dbroot" + dbrootID + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
command = SUDO + glustercmd + " --mode=script volume delete dbroot" + dbrootID + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
|
||||
status = system(command.c_str());
|
||||
|
||||
@@ -9154,7 +9158,7 @@ int Oam::glusterctl(GLUSTER_COMMANDS command, std::string argument1, std::string
|
||||
string command = "";
|
||||
int status;
|
||||
|
||||
command = glustercmd + "peer probe " + ipAddress + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
command = SUDO + glustercmd + "peer probe " + ipAddress + " >> " + tmpdir + "/glusterCommands.log 2>&1";
|
||||
|
||||
cout << "gluster peer probe " + ipAddress << endl;
|
||||
status = system(command.c_str());
|
||||
|
@@ -198,13 +198,14 @@ string DataFileEnvFile;
|
||||
string installDir;
|
||||
string tmpDir;
|
||||
string HOME = "/root";
|
||||
|
||||
string SUDO = "";
|
||||
extern string pwprompt;
|
||||
string mysqlpw = oam::UnassignedName;
|
||||
|
||||
extern const char* pcommand;
|
||||
extern string prompt;
|
||||
|
||||
|
||||
/* create thread argument struct for thr_func() */
|
||||
typedef struct _thread_data_t
|
||||
{
|
||||
@@ -274,8 +275,12 @@ int main(int argc, char* argv[])
|
||||
user = getuid();
|
||||
usergroup = getgid();
|
||||
|
||||
if (user != 0)
|
||||
SUDO = ""
|
||||
if (user != 0) {
|
||||
|
||||
rootUser = false;
|
||||
SUDO = "sudo ";
|
||||
}
|
||||
|
||||
char* p = getenv("USER");
|
||||
|
||||
@@ -5104,7 +5109,7 @@ bool storageSetup(bool amazonInstall)
|
||||
//check if gluster is installed
|
||||
int rtnCode = 1;
|
||||
|
||||
string cmd = "gluster --version > " + tmpDir + "/gluster.log 2>&1";
|
||||
string cmd = SUDO + "gluster --version > " + tmpDir + "/gluster.log 2>&1";
|
||||
rtnCode = system(cmd.c_str());
|
||||
|
||||
if (rtnCode == 0)
|
||||
@@ -5515,7 +5520,7 @@ bool storageSetup(bool amazonInstall)
|
||||
// if gluster
|
||||
if ( storageType == "3" )
|
||||
{
|
||||
string command = "stat /var/run/glusterd.pid > /dev/null 2>&1";
|
||||
string command = SUDO + "stat /var/run/glusterd.pid > /dev/null 2>&1";
|
||||
int status = system(command.c_str());
|
||||
|
||||
if (WEXITSTATUS(status) != 0 )
|
||||
@@ -6704,7 +6709,7 @@ bool glusterSetup(string password)
|
||||
|
||||
string glusterCommandsLog = tmpDir + "/glusterCommands.log";
|
||||
|
||||
command = "gluster peer status " + glusterCommandsLog + "2>&1";
|
||||
command = SUDO + "gluster peer status " + glusterCommandsLog + "2>&1";
|
||||
|
||||
status = system(command.c_str());
|
||||
|
||||
@@ -6729,7 +6734,7 @@ bool glusterSetup(string password)
|
||||
{
|
||||
int dbrootID = db + 1;
|
||||
|
||||
command = "gluster volume create dbroot" + oam.itoa(dbrootID) + " transport tcp replica " + oam.itoa(dataRedundancyCopies) + " ";
|
||||
command = SUDO + "gluster volume create dbroot" + oam.itoa(dbrootID) + " transport tcp replica " + oam.itoa(dataRedundancyCopies) + " ";
|
||||
|
||||
vector<int>::iterator dbrootPmIter = dbrootPms[db].begin();
|
||||
|
||||
@@ -6788,7 +6793,7 @@ bool glusterSetup(string password)
|
||||
{
|
||||
int user = getuid();
|
||||
int group = getgid();
|
||||
command = "gluster volume set dbroot" + oam.itoa(dbrootID) + " storage.owner-uid " + oam.itoa(user) + " >> " + glusterCommandsLog + " 2>&1";
|
||||
command = SUDO + "gluster volume set dbroot" + oam.itoa(dbrootID) + " storage.owner-uid " + oam.itoa(user) + " >> " + glusterCommandsLog + " 2>&1";
|
||||
status = system(command.c_str());
|
||||
|
||||
if (WEXITSTATUS(status) != 0 )
|
||||
@@ -6797,7 +6802,7 @@ bool glusterSetup(string password)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
command = "gluster volume set dbroot" + oam.itoa(dbrootID) + " storage.owner-gid " + oam.itoa(group) + " >> " + glusterCommandsLog + " 2>&1";
|
||||
command = SUDO + "gluster volume set dbroot" + oam.itoa(dbrootID) + " storage.owner-gid " + oam.itoa(group) + " >> " + glusterCommandsLog + " 2>&1";
|
||||
status = system(command.c_str());
|
||||
|
||||
if (WEXITSTATUS(status) != 0 )
|
||||
@@ -6806,7 +6811,7 @@ bool glusterSetup(string password)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
command = "gluster volume start dbroot" + oam.itoa(dbrootID) + " >> " + glusterCommandsLog + " 2>&1";
|
||||
command = SUDO + "gluster volume start dbroot" + oam.itoa(dbrootID) + " >> " + glusterCommandsLog + " 2>&1";
|
||||
status = system(command.c_str());
|
||||
|
||||
if (WEXITSTATUS(status) != 0 )
|
||||
|
@@ -1334,7 +1334,7 @@ void processMSG(messageqcpp::IOSocket* cfIos)
|
||||
//now stop local module
|
||||
processManager.stopModule(config.moduleName(), graceful, manualFlag );
|
||||
|
||||
//run save.brm script
|
||||
//run save brm script
|
||||
processManager.saveBRM(false);
|
||||
|
||||
log.writeLog(__LINE__, "Stop System Completed Success", LOG_TYPE_INFO);
|
||||
|
Reference in New Issue
Block a user