You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-08-08 14:22:09 +03:00
Merge pull request #237 from mariadb-corporation/MCOL-519-Fixes
Mcol 519 fixes
This commit is contained in:
@@ -458,9 +458,9 @@
|
|||||||
<AmazonElasticIPCount>0</AmazonElasticIPCount>
|
<AmazonElasticIPCount>0</AmazonElasticIPCount>
|
||||||
<AmazonElasticIPAddr1>unassigned</AmazonElasticIPAddr1>
|
<AmazonElasticIPAddr1>unassigned</AmazonElasticIPAddr1>
|
||||||
<AmazonElasticModule1>unassigned</AmazonElasticModule1>
|
<AmazonElasticModule1>unassigned</AmazonElasticModule1>
|
||||||
<GlusterConfig>n</GlusterConfig>
|
<DataRedundancyConfig>n</DataRedundancyConfig>
|
||||||
<GlusterCopies>0</GlusterCopies>
|
<DataRedundancyCopies>0</DataRedundancyCopies>
|
||||||
<GlusterStorageType>unassigned</GlusterStorageType>
|
<DataRedundancyStorageType>unassigned</DataRedundancyStorageType>
|
||||||
<CoreFileFlag>n</CoreFileFlag>
|
<CoreFileFlag>n</CoreFileFlag>
|
||||||
<MySQLPort>3306</MySQLPort>
|
<MySQLPort>3306</MySQLPort>
|
||||||
<MySQLPasswordConfig>unassigned</MySQLPasswordConfig>
|
<MySQLPasswordConfig>unassigned</MySQLPasswordConfig>
|
||||||
|
@@ -5203,13 +5203,13 @@ namespace oam
|
|||||||
|
|
||||||
writeLog("manualMovePmDbroot: " + dbrootIDs + " from " + residePM + " to " + toPM, LOG_TYPE_DEBUG );
|
writeLog("manualMovePmDbroot: " + dbrootIDs + " from " + residePM + " to " + toPM, LOG_TYPE_DEBUG );
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
getSystemConfig( "GlusterConfig", GlusterConfig);
|
getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::char_separator<char> sep(", ");
|
boost::char_separator<char> sep(", ");
|
||||||
@@ -5219,7 +5219,7 @@ namespace oam
|
|||||||
++it)
|
++it)
|
||||||
{
|
{
|
||||||
//if gluster, check if there are copies on the to-pm
|
//if gluster, check if there are copies on the to-pm
|
||||||
if ( GlusterConfig == "y")
|
if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
string pmList = "";
|
string pmList = "";
|
||||||
try {
|
try {
|
||||||
@@ -5381,7 +5381,7 @@ namespace oam
|
|||||||
}
|
}
|
||||||
|
|
||||||
//if Gluster, do the assign command
|
//if Gluster, do the assign command
|
||||||
if ( GlusterConfig == "y")
|
if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
string errmsg;
|
string errmsg;
|
||||||
@@ -5477,16 +5477,16 @@ namespace oam
|
|||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
getSystemConfig( "GlusterConfig", GlusterConfig);
|
getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (DBRootStorageType == "internal" && GlusterConfig == "n")
|
if (DBRootStorageType == "internal" && DataRedundancyConfig == "n")
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
// get current Module name
|
// get current Module name
|
||||||
@@ -5621,7 +5621,7 @@ namespace oam
|
|||||||
exceptionControl("autoMovePmDbroot", API_FAILURE);
|
exceptionControl("autoMovePmDbroot", API_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( GlusterConfig == "y")
|
if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
string errmsg;
|
string errmsg;
|
||||||
@@ -5667,7 +5667,7 @@ namespace oam
|
|||||||
{
|
{
|
||||||
//if Gluster, get it's list for DBroot and move to one of those
|
//if Gluster, get it's list for DBroot and move to one of those
|
||||||
string toPmID;
|
string toPmID;
|
||||||
if ( GlusterConfig == "y")
|
if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
string pmList = "";
|
string pmList = "";
|
||||||
try {
|
try {
|
||||||
@@ -5892,16 +5892,16 @@ namespace oam
|
|||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
getSystemConfig( "GlusterConfig", GlusterConfig);
|
getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (DBRootStorageType == "internal" && GlusterConfig == "n")
|
if (DBRootStorageType == "internal" && DataRedundancyConfig == "n")
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
//store in move dbroot transaction file
|
//store in move dbroot transaction file
|
||||||
@@ -6480,13 +6480,13 @@ namespace oam
|
|||||||
cout << endl << "Changes being applied" << endl << endl;
|
cout << endl << "Changes being applied" << endl << endl;
|
||||||
|
|
||||||
//added entered dbroot IDs to to-PM list and do Gluster assign if needed
|
//added entered dbroot IDs to to-PM list and do Gluster assign if needed
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
getSystemConfig( "GlusterConfig", GlusterConfig);
|
getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
DBRootConfigList::iterator pt3 = dbrootlist.begin();
|
DBRootConfigList::iterator pt3 = dbrootlist.begin();
|
||||||
@@ -6494,7 +6494,7 @@ namespace oam
|
|||||||
{
|
{
|
||||||
todbrootConfigList.push_back(*pt3);
|
todbrootConfigList.push_back(*pt3);
|
||||||
|
|
||||||
/* if ( GlusterConfig == "y")
|
/* if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
string errmsg;
|
string errmsg;
|
||||||
@@ -6828,12 +6828,12 @@ namespace oam
|
|||||||
int SystemDBRootCount = 0;
|
int SystemDBRootCount = 0;
|
||||||
string cloud;
|
string cloud;
|
||||||
string DBRootStorageType;
|
string DBRootStorageType;
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
getSystemConfig("DBRootCount", SystemDBRootCount);
|
getSystemConfig("DBRootCount", SystemDBRootCount);
|
||||||
getSystemConfig("Cloud", cloud);
|
getSystemConfig("Cloud", cloud);
|
||||||
getSystemConfig("DBRootStorageType", DBRootStorageType);
|
getSystemConfig("DBRootStorageType", DBRootStorageType);
|
||||||
getSystemConfig("GlusterConfig", GlusterConfig);
|
getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
@@ -6919,7 +6919,7 @@ namespace oam
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if gluster, request volume delete
|
// if gluster, request volume delete
|
||||||
if ( GlusterConfig == "y")
|
if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
string errmsg1;
|
string errmsg1;
|
||||||
@@ -8284,7 +8284,7 @@ namespace oam
|
|||||||
int numberDBRootsPerPM = numberNewDBRoots/numberNewPMs;
|
int numberDBRootsPerPM = numberNewDBRoots/numberNewPMs;
|
||||||
|
|
||||||
std::vector<int> dbrootPms[dbrootCount];
|
std::vector<int> dbrootPms[dbrootCount];
|
||||||
DataRedundancyConfig DataRedundancyConfigs[numberPMs];
|
DataRedundancySetup DataRedundancyConfigs[numberPMs];
|
||||||
int startDBRootID = dbrootID;
|
int startDBRootID = dbrootID;
|
||||||
|
|
||||||
for (int pm=(pmID-1); pm < numberPMs; pm++,startDBRootID++)
|
for (int pm=(pmID-1); pm < numberPMs; pm++,startDBRootID++)
|
||||||
@@ -9605,18 +9605,18 @@ namespace oam
|
|||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
getSystemConfig( "GlusterConfig", GlusterConfig);
|
getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( (DBRootStorageType == "external" && GlusterConfig == "n")
|
if ( (DBRootStorageType == "external" && DataRedundancyConfig == "n")
|
||||||
||
|
||
|
||||||
(GlusterConfig == "y" && !mount) )
|
(DataRedundancyConfig == "y" && !mount) )
|
||||||
{
|
{
|
||||||
dbrootList::iterator pt3 = dbrootConfigList.begin();
|
dbrootList::iterator pt3 = dbrootConfigList.begin();
|
||||||
for( ; pt3 != dbrootConfigList.end() ; pt3++)
|
for( ; pt3 != dbrootConfigList.end() ; pt3++)
|
||||||
|
@@ -1304,7 +1304,7 @@ namespace oam
|
|||||||
};
|
};
|
||||||
typedef std::vector<DataRedundancyStorageSetup> DataRedundancyStorage;
|
typedef std::vector<DataRedundancyStorageSetup> DataRedundancyStorage;
|
||||||
|
|
||||||
struct DataRedundancyConfig_s
|
struct DataRedundancySetup_s
|
||||||
{
|
{
|
||||||
int pmID;
|
int pmID;
|
||||||
std::string pmHostname;
|
std::string pmHostname;
|
||||||
@@ -1312,7 +1312,7 @@ namespace oam
|
|||||||
std::vector<int> dbrootCopies;
|
std::vector<int> dbrootCopies;
|
||||||
DataRedundancyStorage storageLocations;
|
DataRedundancyStorage storageLocations;
|
||||||
};
|
};
|
||||||
typedef struct DataRedundancyConfig_s DataRedundancyConfig;
|
typedef struct DataRedundancySetup_s DataRedundancySetup;
|
||||||
|
|
||||||
// username / password for smbclient use
|
// username / password for smbclient use
|
||||||
const std::string USERNAME = "oamuser";
|
const std::string USERNAME = "oamuser";
|
||||||
|
@@ -1876,21 +1876,21 @@ int processCommand(string* arguments)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
string GlusterConfig;
|
string DataRedundancyConfig;
|
||||||
string DataRedundancyCopies;
|
string DataRedundancyCopies;
|
||||||
string DataRedundancyStorageType;
|
string DataRedundancyStorageType;
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig("GlusterConfig", GlusterConfig);
|
oam.getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
|
||||||
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
|
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
|
||||||
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
|
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
|
||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
if ( GlusterConfig == "y" )
|
if ( DataRedundancyConfig == "y" )
|
||||||
{
|
{
|
||||||
cout << endl << "Data Redundant Configuration" << endl << endl;
|
cout << endl << "Data Redundant Configuration" << endl << endl;
|
||||||
cout << "Copies Per DBroot = " << DataRedundancyCopies << endl;
|
cout << "Copies Per DBroot = " << DataRedundancyCopies << endl;
|
||||||
cout << "Storage Type = " << DataRedundancyStorageType << endl;
|
//cout << "Storage Type = " << DataRedundancyStorageType << endl;
|
||||||
|
|
||||||
oamModuleInfo_t st;
|
oamModuleInfo_t st;
|
||||||
string moduleType;
|
string moduleType;
|
||||||
@@ -1952,14 +1952,14 @@ int processCommand(string* arguments)
|
|||||||
|
|
||||||
case 14: // addDbroot parameters: dbroot-number
|
case 14: // addDbroot parameters: dbroot-number
|
||||||
{
|
{
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
if (GlusterConfig == "y") {
|
if (DataRedundancyConfig == "y") {
|
||||||
cout << endl << "**** addDbroot Not Supported on Data Redundancy Configured System, use addModule command to expand your capacity" << endl;
|
cout << endl << "**** addDbroot Not Supported on Data Redundancy Configured System, use addModule command to expand your capacity" << endl;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -2042,9 +2042,9 @@ int processCommand(string* arguments)
|
|||||||
case 15: // removeDbroot parameters: dbroot-list
|
case 15: // removeDbroot parameters: dbroot-list
|
||||||
{
|
{
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{}
|
{}
|
||||||
@@ -3262,15 +3262,15 @@ int processCommand(string* arguments)
|
|||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
if (DBRootStorageType == "internal" && GlusterConfig == "n")
|
if (DBRootStorageType == "internal" && DataRedundancyConfig == "n")
|
||||||
{
|
{
|
||||||
cout << endl << "**** switchParentOAMModule Failed : DBRoot Storage type = internal/non-data-replication" << endl;
|
cout << endl << "**** switchParentOAMModule Failed : DBRoot Storage type = internal/non-data-replication" << endl;
|
||||||
break;
|
break;
|
||||||
@@ -3307,7 +3307,7 @@ int processCommand(string* arguments)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//check for gluster system is do-able
|
//check for gluster system is do-able
|
||||||
if (GlusterConfig == "y")
|
if (DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
// get to-module assigned DBRoots and see if current active PM
|
// get to-module assigned DBRoots and see if current active PM
|
||||||
// has a copy
|
// has a copy
|
||||||
@@ -3574,17 +3574,17 @@ int processCommand(string* arguments)
|
|||||||
cout << endl << "**** getSystemStatus Failed = " << e.what() << endl;
|
cout << endl << "**** getSystemStatus Failed = " << e.what() << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
string GlusterConfig;
|
string DataRedundancyConfig;
|
||||||
string DataRedundancyCopies;
|
string DataRedundancyCopies;
|
||||||
string DataRedundancyStorageType;
|
string DataRedundancyStorageType;
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig("GlusterConfig", GlusterConfig);
|
oam.getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
|
||||||
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
|
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
|
||||||
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
|
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
|
||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
if ( GlusterConfig == "y" )
|
if ( DataRedundancyConfig == "y" )
|
||||||
{
|
{
|
||||||
string arg1 = "";
|
string arg1 = "";
|
||||||
string arg2 = "";
|
string arg2 = "";
|
||||||
@@ -3754,13 +3754,13 @@ int processCommand(string* arguments)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//if gluster, check if toPM is has a copy
|
//if gluster, check if toPM is has a copy
|
||||||
string GlusterConfig;
|
string DataRedundancyConfig;
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig("GlusterConfig", GlusterConfig);
|
oam.getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
if ( GlusterConfig == "y" )
|
if ( DataRedundancyConfig == "y" )
|
||||||
{
|
{
|
||||||
string pmList = "";
|
string pmList = "";
|
||||||
try {
|
try {
|
||||||
@@ -4224,16 +4224,16 @@ int processCommand(string* arguments)
|
|||||||
oam.getSystemConfig("DBRootStorageType", DBRootStorageType);
|
oam.getSystemConfig("DBRootStorageType", DBRootStorageType);
|
||||||
|
|
||||||
if (DBRootStorageType == "external" ){
|
if (DBRootStorageType == "external" ){
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
string cloud = oam::UnassignedName;
|
string cloud = oam::UnassignedName;
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig("Cloud", cloud);
|
oam.getSystemConfig("Cloud", cloud);
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
if ( GlusterConfig == "n" && cloud == oam::UnassignedName)
|
if ( DataRedundancyConfig == "n" && cloud == oam::UnassignedName)
|
||||||
cout << " REMINDER: Update the /etc/fstab on " << toPM << " to include these dbroot mounts" << endl << endl;
|
cout << " REMINDER: Update the /etc/fstab on " << toPM << " to include these dbroot mounts" << endl << endl;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -5103,7 +5103,7 @@ int processCommand(string* arguments)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
int DataRedundancyCopies;
|
int DataRedundancyCopies;
|
||||||
string cloud = oam::UnassignedName;
|
string cloud = oam::UnassignedName;
|
||||||
int DataRedundancyNetworkType;
|
int DataRedundancyNetworkType;
|
||||||
@@ -5113,7 +5113,7 @@ int processCommand(string* arguments)
|
|||||||
try {
|
try {
|
||||||
oam.getSystemConfig("Cloud", cloud);
|
oam.getSystemConfig("Cloud", cloud);
|
||||||
oam.getSystemConfig("AmazonVPCNextPrivateIP", AmazonVPCNextPrivateIP);
|
oam.getSystemConfig("AmazonVPCNextPrivateIP", AmazonVPCNextPrivateIP);
|
||||||
oam.getSystemConfig("GlusterConfig", GlusterConfig);
|
oam.getSystemConfig("DataRedundancyConfig", DataRedundancyConfig);
|
||||||
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
|
oam.getSystemConfig("DataRedundancyCopies", DataRedundancyCopies);
|
||||||
oam.getSystemConfig("DataRedundancyNetworkType", DataRedundancyNetworkType);
|
oam.getSystemConfig("DataRedundancyNetworkType", DataRedundancyNetworkType);
|
||||||
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
|
oam.getSystemConfig("DataRedundancyStorageType", DataRedundancyStorageType);
|
||||||
@@ -5206,7 +5206,7 @@ int processCommand(string* arguments)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( GlusterConfig == "y" && moduleType == "pm" ) {
|
if ( DataRedundancyConfig == "y" && moduleType == "pm" ) {
|
||||||
if ( localModule != parentOAMModule ) {
|
if ( localModule != parentOAMModule ) {
|
||||||
// exit out since not on active module
|
// exit out since not on active module
|
||||||
cout << endl << "**** addModule Failed : Can only run command on Active OAM Parent Module (" << parentOAMModule << ")." << endl;
|
cout << endl << "**** addModule Failed : Can only run command on Active OAM Parent Module (" << parentOAMModule << ")." << endl;
|
||||||
@@ -5435,7 +5435,7 @@ int processCommand(string* arguments)
|
|||||||
devicenetworkconfig.hostConfigList.clear();
|
devicenetworkconfig.hostConfigList.clear();
|
||||||
moduleName.clear();
|
moduleName.clear();
|
||||||
|
|
||||||
if ( GlusterConfig == "y" && DataRedundancyNetworkType == 2 && moduleType == "pm")
|
if ( DataRedundancyConfig == "y" && DataRedundancyNetworkType == 2 && moduleType == "pm")
|
||||||
{
|
{
|
||||||
string DataRedundancyIPAddress = sysConfig->getConfig("DataRedundancyConfig",dataDupIPaddr);
|
string DataRedundancyIPAddress = sysConfig->getConfig("DataRedundancyConfig",dataDupIPaddr);
|
||||||
string DataRedundancyHostname = sysConfig->getConfig("DataRedundancyConfig",dataDupHostName);
|
string DataRedundancyHostname = sysConfig->getConfig("DataRedundancyConfig",dataDupHostName);
|
||||||
@@ -5463,7 +5463,7 @@ int processCommand(string* arguments)
|
|||||||
storageDeviceList storagedevicelist;
|
storageDeviceList storagedevicelist;
|
||||||
string deviceType;
|
string deviceType;
|
||||||
|
|
||||||
if ( GlusterConfig == "y" && moduleType == "pm")
|
if ( DataRedundancyConfig == "y" && moduleType == "pm")
|
||||||
{
|
{
|
||||||
cout << endl << "System is configured with Data Redundancy, DBRoot Storage will" << endl;
|
cout << endl << "System is configured with Data Redundancy, DBRoot Storage will" << endl;
|
||||||
cout << "will be created with the Modules during this command." << endl;
|
cout << "will be created with the Modules during this command." << endl;
|
||||||
@@ -5528,7 +5528,7 @@ int processCommand(string* arguments)
|
|||||||
|
|
||||||
cout << "Add Module(s) successfully completed" << endl;
|
cout << "Add Module(s) successfully completed" << endl;
|
||||||
|
|
||||||
if ( GlusterConfig == "y" && moduleType == "pm" ) {
|
if ( DataRedundancyConfig == "y" && moduleType == "pm" ) {
|
||||||
|
|
||||||
{
|
{
|
||||||
//send messages to update fstab to new modules, if needed
|
//send messages to update fstab to new modules, if needed
|
||||||
|
@@ -177,7 +177,7 @@ bool noPrompting = false;
|
|||||||
bool rootUser = true;
|
bool rootUser = true;
|
||||||
string USER = "root";
|
string USER = "root";
|
||||||
bool hdfs = false;
|
bool hdfs = false;
|
||||||
bool gluster = false;
|
bool DataRedundancy = false;
|
||||||
bool pmwithum = false;
|
bool pmwithum = false;
|
||||||
bool mysqlRep = false;
|
bool mysqlRep = false;
|
||||||
string MySQLRep = "y";
|
string MySQLRep = "y";
|
||||||
@@ -1461,7 +1461,12 @@ int main(int argc, char *argv[])
|
|||||||
exit(1);
|
exit(1);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if ( moduleType == "pm" && DataRedundancy && moduleCount == 1) {
|
||||||
|
cout << endl << "ERROR: DataRedundancy requires " + moduleType + " module type to be 2 or greater, please re-enter or select a different data storage type." << endl << endl;
|
||||||
|
if ( noPrompting )
|
||||||
|
exit(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
//update count
|
//update count
|
||||||
try {
|
try {
|
||||||
string ModuleCountParm = "ModuleCount" + oam.itoa(i+1);
|
string ModuleCountParm = "ModuleCount" + oam.itoa(i+1);
|
||||||
@@ -3173,13 +3178,13 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
|
|
||||||
//configure data redundancy
|
//configure data redundancy
|
||||||
if (gluster )
|
if (DataRedundancy )
|
||||||
{
|
{
|
||||||
cout << endl;
|
cout << endl;
|
||||||
string start = "y";
|
string start = "y";
|
||||||
if ( reuseConfig == "y" )
|
if ( reuseConfig == "y" )
|
||||||
start = "n";
|
start = "n";
|
||||||
|
/*
|
||||||
while(true)
|
while(true)
|
||||||
{
|
{
|
||||||
pcommand = callReadline("Would you like to configure MariaDB ColumnStore Data Redundancy? [y,n] (" + start + ") > ");
|
pcommand = callReadline("Would you like to configure MariaDB ColumnStore Data Redundancy? [y,n] (" + start + ") > ");
|
||||||
@@ -3196,7 +3201,7 @@ int main(int argc, char *argv[])
|
|||||||
if ( noPrompting )
|
if ( noPrompting )
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
if ( start == "y" ) {
|
if ( start == "y" ) {
|
||||||
cout << endl << "===== Configuring MariaDB ColumnStore Data Redundancy Functionality =====" << endl << endl;
|
cout << endl << "===== Configuring MariaDB ColumnStore Data Redundancy Functionality =====" << endl << endl;
|
||||||
if (!glusterSetup(password))
|
if (!glusterSetup(password))
|
||||||
@@ -3429,7 +3434,7 @@ int main(int argc, char *argv[])
|
|||||||
cout << " DONE" << endl;
|
cout << " DONE" << endl;
|
||||||
|
|
||||||
// IF gluster is enabled we need to modify fstab on remote systems.
|
// IF gluster is enabled we need to modify fstab on remote systems.
|
||||||
if (gluster )
|
if (DataRedundancy )
|
||||||
{
|
{
|
||||||
int numberDBRootsPerPM = DBRootCount/pmNumber;
|
int numberDBRootsPerPM = DBRootCount/pmNumber;
|
||||||
for (int pm=0; pm < pmNumber; pm++)
|
for (int pm=0; pm < pmNumber; pm++)
|
||||||
@@ -4119,8 +4124,8 @@ bool storageSetup(bool amazonInstall)
|
|||||||
if ( DBRootStorageType == "hdfs")
|
if ( DBRootStorageType == "hdfs")
|
||||||
hdfs = true;
|
hdfs = true;
|
||||||
|
|
||||||
if ( DBRootStorageType == "gluster")
|
if ( DBRootStorageType == "DataRedundancy")
|
||||||
gluster = true;
|
DataRedundancy = true;
|
||||||
|
|
||||||
if ( reuseConfig == "y" ) {
|
if ( reuseConfig == "y" ) {
|
||||||
cout << "===== Storage Configuration = " + DBRootStorageType + " =====" << endl << endl;
|
cout << "===== Storage Configuration = " + DBRootStorageType + " =====" << endl << endl;
|
||||||
@@ -4463,7 +4468,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
storageType = "1";
|
storageType = "1";
|
||||||
if ( DBRootStorageType == "external" )
|
if ( DBRootStorageType == "external" )
|
||||||
storageType = "2";
|
storageType = "2";
|
||||||
if ( DBRootStorageType == "gluster" )
|
if ( DBRootStorageType == "DataRedundancy" )
|
||||||
storageType = "3";
|
storageType = "3";
|
||||||
if ( DBRootStorageType == "hdfs" )
|
if ( DBRootStorageType == "hdfs" )
|
||||||
storageType = "4";
|
storageType = "4";
|
||||||
@@ -4476,7 +4481,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
prompt = "Select the type of Data Storage [1=internal, 2=external] (" + storageType + ") > ";
|
prompt = "Select the type of Data Storage [1=internal, 2=external] (" + storageType + ") > ";
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( glusterInstalled == "y" && hadoopInstalled == "n" )
|
if ( (glusterInstalled == "y" && singleServerInstall != "1") && hadoopInstalled == "n" )
|
||||||
{
|
{
|
||||||
cout << "There are 3 options when configuring the storage: internal, external, or DataRedundancy" << endl << endl;
|
cout << "There are 3 options when configuring the storage: internal, external, or DataRedundancy" << endl << endl;
|
||||||
prompt = "Select the type of Data Storage [1=internal, 2=external, 3=DataRedundancy] (" + storageType + ") > ";
|
prompt = "Select the type of Data Storage [1=internal, 2=external, 3=DataRedundancy] (" + storageType + ") > ";
|
||||||
@@ -4488,7 +4493,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
prompt = "Select the type of Data Storage [1=internal, 2=external, 4=hdfs] (" + storageType + ") > ";
|
prompt = "Select the type of Data Storage [1=internal, 2=external, 4=hdfs] (" + storageType + ") > ";
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( glusterInstalled == "y" && hadoopInstalled == "y" )
|
if ( (glusterInstalled == "y" && singleServerInstall != "1") && hadoopInstalled == "y" )
|
||||||
{
|
{
|
||||||
cout << "There are 5 options when configuring the storage: internal, external, DataRedundancy, or hdfs" << endl << endl;
|
cout << "There are 5 options when configuring the storage: internal, external, DataRedundancy, or hdfs" << endl << endl;
|
||||||
prompt = "Select the type of Data Storage [1=internal, 2=external, 3=DataRedundancy, 4=hdfs] (" + storageType + ") > ";
|
prompt = "Select the type of Data Storage [1=internal, 2=external, 3=DataRedundancy, 4=hdfs] (" + storageType + ") > ";
|
||||||
@@ -4499,7 +4504,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
cout << " 'external' - This is specified when the DBRoot directories are mounted." << endl;
|
cout << " 'external' - This is specified when the DBRoot directories are mounted." << endl;
|
||||||
cout << " High Availability Server Failover is Supported in this mode." << endl << endl;
|
cout << " High Availability Server Failover is Supported in this mode." << endl << endl;
|
||||||
|
|
||||||
if ( glusterInstalled == "y" )
|
if ( glusterInstalled == "y" && singleServerInstall != "1")
|
||||||
{
|
{
|
||||||
cout << " 'DataRedundancy' - This is specified when gluster is installed and you want" << endl;
|
cout << " 'DataRedundancy' - This is specified when gluster is installed and you want" << endl;
|
||||||
cout << " the DBRoot directories to be controlled by ColumnStore Data Redundancy." << endl;
|
cout << " the DBRoot directories to be controlled by ColumnStore Data Redundancy." << endl;
|
||||||
@@ -4531,7 +4536,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( glusterInstalled == "y" && hadoopInstalled == "n" )
|
if ( (glusterInstalled == "y" && singleServerInstall != "1") && hadoopInstalled == "n" )
|
||||||
{
|
{
|
||||||
if ( storageType == "1" || storageType == "2" || storageType == "3")
|
if ( storageType == "1" || storageType == "2" || storageType == "3")
|
||||||
break;
|
break;
|
||||||
@@ -4550,7 +4555,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( glusterInstalled == "y" && hadoopInstalled == "y" )
|
if ( (glusterInstalled == "y" && singleServerInstall != "1") && hadoopInstalled == "y" )
|
||||||
{
|
{
|
||||||
if ( storageType == "1" || storageType == "2" || storageType == "3" || storageType == "4")
|
if ( storageType == "1" || storageType == "2" || storageType == "3" || storageType == "4")
|
||||||
break;
|
break;
|
||||||
@@ -4560,6 +4565,34 @@ bool storageSetup(bool amazonInstall)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (storageType != "3" && DataRedundancy)
|
||||||
|
{
|
||||||
|
cout << "WARNING: This system was configured with ColumnStore DataRedundancy" << endl;
|
||||||
|
cout << " The selection to change from DataRedundancy to a different" << endl;
|
||||||
|
cout << " storage type will require to cleanup. Exit and refer to" << endl;
|
||||||
|
cout << " ColumnStore documentation for procedures or continue." << endl;
|
||||||
|
|
||||||
|
cout << endl;
|
||||||
|
string continueInstall = "y";
|
||||||
|
while(true)
|
||||||
|
{
|
||||||
|
pcommand = callReadline("Would you like to continue with this storage setting? [y,n] (" + continueInstall + ") > ");
|
||||||
|
if (pcommand)
|
||||||
|
{
|
||||||
|
if (strlen(pcommand) > 0) continueInstall = pcommand;
|
||||||
|
callFree(pcommand);
|
||||||
|
}
|
||||||
|
if ( continueInstall == "y" || continueInstall == "n" )
|
||||||
|
break;
|
||||||
|
else
|
||||||
|
cout << "Invalid Entry, please enter 'y' for yes or 'n' for no" << endl;
|
||||||
|
continueInstall = "y";
|
||||||
|
if ( noPrompting )
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
if ( continueInstall == "n")
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
switch ( atoi(storageType.c_str()) ) {
|
switch ( atoi(storageType.c_str()) ) {
|
||||||
case (1):
|
case (1):
|
||||||
{
|
{
|
||||||
@@ -4573,7 +4606,7 @@ bool storageSetup(bool amazonInstall)
|
|||||||
}
|
}
|
||||||
case (3):
|
case (3):
|
||||||
{
|
{
|
||||||
DBRootStorageType = "gluster";
|
DBRootStorageType = "DataRedundancy";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case (4):
|
case (4):
|
||||||
@@ -4762,14 +4795,14 @@ bool storageSetup(bool amazonInstall)
|
|||||||
// if gluster
|
// if gluster
|
||||||
if ( storageType == "3" )
|
if ( storageType == "3" )
|
||||||
{
|
{
|
||||||
gluster = true;
|
DataRedundancy = true;
|
||||||
sysConfig->setConfig(InstallSection, "GlusterConfig", "y");
|
sysConfig->setConfig(InstallSection, "DataRedundancyConfig", "y");
|
||||||
sysConfig->setConfig("PrimitiveServers", "DirectIO", "n");
|
sysConfig->setConfig("PrimitiveServers", "DirectIO", "n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
gluster = false;
|
DataRedundancy = false;
|
||||||
sysConfig->setConfig(InstallSection, "GlusterConfig", "n");
|
sysConfig->setConfig(InstallSection, "DataRedundancyConfig", "n");
|
||||||
sysConfig->setConfig("PrimitiveServers", "DirectIO", "y");
|
sysConfig->setConfig("PrimitiveServers", "DirectIO", "y");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5400,42 +5433,56 @@ bool glusterSetup(string password) {
|
|||||||
int numberDBRootsPerPM = DBRootCount/pmNumber;
|
int numberDBRootsPerPM = DBRootCount/pmNumber;
|
||||||
int numberBricksPM = 0;
|
int numberBricksPM = 0;
|
||||||
std::vector<int> dbrootPms[DBRootCount];
|
std::vector<int> dbrootPms[DBRootCount];
|
||||||
DataRedundancyConfig DataRedundancyConfigs[pmNumber];
|
DataRedundancySetup DataRedundancyConfigs[pmNumber];
|
||||||
string command = "";
|
string command = "";
|
||||||
string remoteCommand = installDir + "/bin/remote_command.sh ";
|
string remoteCommand = installDir + "/bin/remote_command.sh ";
|
||||||
// how many copies?
|
// how many copies?
|
||||||
cout << endl;
|
if (pmNumber > 2)
|
||||||
cout << "Setup the Number of Copies: This is the total number of copies of the data" << endl;
|
|
||||||
cout << "in the system and a non-redundant system has 1 copy, so choose 2 or more," << endl;
|
|
||||||
cout << "but not more than the number of PMs which is " + oam.itoa(pmNumber) + "." << endl;
|
|
||||||
|
|
||||||
while(dataRedundancyCopies < 2 || dataRedundancyCopies > pmNumber)
|
|
||||||
{
|
{
|
||||||
dataRedundancyCopies = 2;
|
cout << endl;
|
||||||
prompt = "Enter Number of Copies [2-" + oam.itoa(pmNumber) + "] ("+ oam.itoa(dataRedundancyCopies) +") > ";
|
cout << "Setup the Number of Copies: This is the total number of copies of the data" << endl;
|
||||||
pcommand = callReadline(prompt.c_str());
|
cout << "in the system. At least 2, but not more than the number of PMs(" + oam.itoa(pmNumber) + "), are required." << endl;
|
||||||
if (pcommand) {
|
while(dataRedundancyCopies < 2 || dataRedundancyCopies > pmNumber)
|
||||||
if (strlen(pcommand) > 0) dataRedundancyCopies = atoi(pcommand);
|
|
||||||
callFree(pcommand);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( dataRedundancyCopies < 2 || dataRedundancyCopies > pmNumber ) {
|
|
||||||
cout << endl << "ERROR: Invalid Copy Count '" + oam.itoa(dataRedundancyCopies) + "', please re-enter" << endl << endl;
|
|
||||||
if ( noPrompting )
|
|
||||||
exit(1);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
//update count
|
|
||||||
try {
|
|
||||||
sysConfig->setConfig(InstallSection, "DataRedundancyCopies", oam.itoa(dataRedundancyCopies));
|
|
||||||
}
|
|
||||||
catch(...)
|
|
||||||
{
|
{
|
||||||
cout << "ERROR: Problem setting DataRedundancyCopies in the MariaDB ColumnStore System Configuration file" << endl;
|
dataRedundancyCopies = 2; //minimum 2 copies
|
||||||
exit(1);
|
prompt = "Enter Number of Copies [2-" + oam.itoa(pmNumber) + "] ("+ oam.itoa(dataRedundancyCopies) +") > ";
|
||||||
|
pcommand = callReadline(prompt.c_str());
|
||||||
|
if (pcommand) {
|
||||||
|
if (strlen(pcommand) > 0) dataRedundancyCopies = atoi(pcommand);
|
||||||
|
callFree(pcommand);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( dataRedundancyCopies < 2 || dataRedundancyCopies > pmNumber ) {
|
||||||
|
cout << endl << "ERROR: Invalid Copy Count '" + oam.itoa(dataRedundancyCopies) + "', please re-enter" << endl << endl;
|
||||||
|
if ( noPrompting )
|
||||||
|
exit(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else if (pmNumber == 2)
|
||||||
|
{
|
||||||
|
dataRedundancyCopies = 2; //minimum 2 copies
|
||||||
|
cout << endl;
|
||||||
|
cout << "Only 2 PMs configured. Setting number of copies at 2." << endl;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// This should never happen
|
||||||
|
cout << endl;
|
||||||
|
cout << "ERROR: Invalid value for pm count Data Redundancy could not be configured." << endl;
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
//update count
|
||||||
|
try {
|
||||||
|
sysConfig->setConfig(InstallSection, "DataRedundancyCopies", oam.itoa(dataRedundancyCopies));
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
cout << "ERROR: Problem setting DataRedundancyCopies in the MariaDB ColumnStore System Configuration file" << endl;
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
numberBricksPM = numberDBRootsPerPM * dataRedundancyCopies;
|
numberBricksPM = numberDBRootsPerPM * dataRedundancyCopies;
|
||||||
|
|
||||||
@@ -5609,6 +5656,7 @@ bool glusterSetup(string password) {
|
|||||||
DataRedundancyConfigs[pm].pmHostname = sysConfig->getConfig("SystemModuleConfig",pmHostName);
|
DataRedundancyConfigs[pm].pmHostname = sysConfig->getConfig("SystemModuleConfig",pmHostName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
cout << endl;
|
cout << endl;
|
||||||
cout << "OK. You have " + oam.itoa(pmNumber) + " PMs, " + oam.itoa(DBRootCount) + " DBRoots, and you have chosen to keep " + oam.itoa(dataRedundancyCopies) << endl;
|
cout << "OK. You have " + oam.itoa(pmNumber) + " PMs, " + oam.itoa(DBRootCount) + " DBRoots, and you have chosen to keep " + oam.itoa(dataRedundancyCopies) << endl;
|
||||||
cout << "copies of the data. You can choose to place the copies in " << endl;
|
cout << "copies of the data. You can choose to place the copies in " << endl;
|
||||||
@@ -5617,7 +5665,7 @@ bool glusterSetup(string password) {
|
|||||||
while( dataRedundancyStorage != 1 && dataRedundancyStorage != 2 )
|
while( dataRedundancyStorage != 1 && dataRedundancyStorage != 2 )
|
||||||
{
|
{
|
||||||
dataRedundancyStorage = 1;
|
dataRedundancyStorage = 1;
|
||||||
prompt = "Select the data redundancy network [1=directory, 2=storage] (" + oam.itoa(dataRedundancyStorage) + ") > ";
|
prompt = "Select the data redundancy storage device [1=directory, 2=storage] (" + oam.itoa(dataRedundancyStorage) + ") > ";
|
||||||
pcommand = callReadline(prompt.c_str());
|
pcommand = callReadline(prompt.c_str());
|
||||||
if (pcommand)
|
if (pcommand)
|
||||||
{
|
{
|
||||||
@@ -5654,6 +5702,12 @@ bool glusterSetup(string password) {
|
|||||||
//loop through pms and get storage locations for each
|
//loop through pms and get storage locations for each
|
||||||
for (int pm=0; pm < pmNumber; pm++)
|
for (int pm=0; pm < pmNumber; pm++)
|
||||||
{
|
{
|
||||||
|
vector<int>::iterator dbrootID = DataRedundancyConfigs[pm].dbrootCopies.begin();
|
||||||
|
for (; dbrootID < DataRedundancyConfigs[pm].dbrootCopies.end(); dbrootID++ )
|
||||||
|
{
|
||||||
|
int brick = (*dbrootID);
|
||||||
|
cout << "PM#" + oam.itoa(DataRedundancyConfigs[pm].pmID) + " brick#" + oam.itoa(brick) + " : " << endl;
|
||||||
|
}
|
||||||
for (int brick=0; brick < numberBricksPM; brick++)
|
for (int brick=0; brick < numberBricksPM; brick++)
|
||||||
{
|
{
|
||||||
prompt = "Enter a storage locations for PM#" + oam.itoa(DataRedundancyConfigs[pm].pmID) + " brick#" + oam.itoa(brick) + " : ";
|
prompt = "Enter a storage locations for PM#" + oam.itoa(DataRedundancyConfigs[pm].pmID) + " brick#" + oam.itoa(brick) + " : ";
|
||||||
@@ -5683,7 +5737,7 @@ bool glusterSetup(string password) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
// User config complete setup the gluster bricks
|
// User config complete setup the gluster bricks
|
||||||
// This will distribute DBRootCopies evenly across PMs
|
// This will distribute DBRootCopies evenly across PMs
|
||||||
for (int pm=0; pm < pmNumber; pm++)
|
for (int pm=0; pm < pmNumber; pm++)
|
||||||
@@ -5751,6 +5805,7 @@ bool glusterSetup(string password) {
|
|||||||
cout << "ERROR: command failed: " << command << endl;
|
cout << "ERROR: command failed: " << command << endl;
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
if (dataRedundancyStorage == 2)
|
if (dataRedundancyStorage == 2)
|
||||||
{
|
{
|
||||||
//walk data storage locations and modify fstab to reflect the storage locations entered by user
|
//walk data storage locations and modify fstab to reflect the storage locations entered by user
|
||||||
@@ -5808,15 +5863,17 @@ bool glusterSetup(string password) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
if (rootUser)
|
if (rootUser)
|
||||||
{
|
{
|
||||||
command = "gluster peer probe " + DataRedundancyConfigs[pm].pmIpAddr;
|
command = "gluster peer probe " + DataRedundancyConfigs[pm].pmIpAddr + " >> /tmp/glusterCommands.txt 2>&1";
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
command = "sudo gluster peer probe " + DataRedundancyConfigs[pm].pmIpAddr;
|
command = "sudo gluster peer probe " + DataRedundancyConfigs[pm].pmIpAddr + " >> /tmp/glusterCommands.txt 2>&1";
|
||||||
}
|
}
|
||||||
|
cout << "gluster peer probe " + DataRedundancyConfigs[pm].pmIpAddr << endl;
|
||||||
status = system(command.c_str());
|
status = system(command.c_str());
|
||||||
if (WEXITSTATUS(status) != 0 )
|
if (WEXITSTATUS(status) != 0 )
|
||||||
{
|
{
|
||||||
@@ -5825,7 +5882,7 @@ bool glusterSetup(string password) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
sleep(5);
|
sleep(5);
|
||||||
command = "gluster peer status ";
|
command = "gluster peer status >> /tmp/glusterCommands.txt 2>&1";
|
||||||
status = system(command.c_str());
|
status = system(command.c_str());
|
||||||
if (WEXITSTATUS(status) != 0 )
|
if (WEXITSTATUS(status) != 0 )
|
||||||
{
|
{
|
||||||
@@ -5836,6 +5893,11 @@ bool glusterSetup(string password) {
|
|||||||
//TODO: figureout a cleaner way to do this.
|
//TODO: figureout a cleaner way to do this.
|
||||||
sleep(10);
|
sleep(10);
|
||||||
// Build the gluster volumes and start them for each dbroot
|
// Build the gluster volumes and start them for each dbroot
|
||||||
|
int pmnextbrick[pmNumber];
|
||||||
|
for (int pm=0; pm < pmNumber; pm++)
|
||||||
|
{
|
||||||
|
pmnextbrick[pm]=1;
|
||||||
|
}
|
||||||
for (int db=0; db < DBRootCount; db++)
|
for (int db=0; db < DBRootCount; db++)
|
||||||
{
|
{
|
||||||
int dbrootID = db + 1;
|
int dbrootID = db + 1;
|
||||||
@@ -5851,9 +5913,11 @@ bool glusterSetup(string password) {
|
|||||||
for (; dbrootPmIter < dbrootPms[db].end(); dbrootPmIter++ )
|
for (; dbrootPmIter < dbrootPms[db].end(); dbrootPmIter++ )
|
||||||
{
|
{
|
||||||
int pm = (*dbrootPmIter) - 1;
|
int pm = (*dbrootPmIter) - 1;
|
||||||
command += DataRedundancyConfigs[pm].pmIpAddr + ":" + installDir +"/gluster/brick" + oam.itoa(dbrootID) + " ";
|
command += DataRedundancyConfigs[pm].pmIpAddr + ":" + installDir +"/gluster/brick" + oam.itoa(pmnextbrick[pm]) + " ";
|
||||||
|
pmnextbrick[pm]++;
|
||||||
}
|
}
|
||||||
command += "force";
|
command += "force >> /tmp/glusterCommands.txt 2>&1";
|
||||||
|
cout << "Gluster create and start volume dbroot" << oam.itoa(dbrootID) << "...";
|
||||||
status = system(command.c_str());
|
status = system(command.c_str());
|
||||||
if (WEXITSTATUS(status) != 0 )
|
if (WEXITSTATUS(status) != 0 )
|
||||||
{
|
{
|
||||||
@@ -5862,11 +5926,11 @@ bool glusterSetup(string password) {
|
|||||||
}
|
}
|
||||||
if (rootUser)
|
if (rootUser)
|
||||||
{
|
{
|
||||||
command = "gluster volume start dbroot" + oam.itoa(dbrootID);
|
command = "gluster volume start dbroot" + oam.itoa(dbrootID) + " >> /tmp/glusterCommands.txt 2>&1";
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
command = "sudo gluster volume start dbroot" + oam.itoa(dbrootID);
|
command = "sudo gluster volume start dbroot" + oam.itoa(dbrootID) + " >> /tmp/glusterCommands.txt 2>&1";
|
||||||
}
|
}
|
||||||
status = system(command.c_str());
|
status = system(command.c_str());
|
||||||
if (WEXITSTATUS(status) != 0 )
|
if (WEXITSTATUS(status) != 0 )
|
||||||
@@ -5874,6 +5938,7 @@ bool glusterSetup(string password) {
|
|||||||
cout << "ERROR: command failed: " << command << endl;
|
cout << "ERROR: command failed: " << command << endl;
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
cout << "DONE" << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@@ -92,13 +92,13 @@ void diskMonitor()
|
|||||||
}
|
}
|
||||||
|
|
||||||
//get Gluster Config setting
|
//get Gluster Config setting
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
int diskSpaceCheck = 0;
|
int diskSpaceCheck = 0;
|
||||||
@@ -322,7 +322,7 @@ void diskMonitor()
|
|||||||
|
|
||||||
//check for external file systems/devices
|
//check for external file systems/devices
|
||||||
if (Externalflag ||
|
if (Externalflag ||
|
||||||
(!Externalflag && GlusterConfig == "y" && moduleType == "pm") ){
|
(!Externalflag && DataRedundancyConfig == "y" && moduleType == "pm") ){
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
DBRootConfigList dbrootConfigList;
|
DBRootConfigList dbrootConfigList;
|
||||||
@@ -565,7 +565,7 @@ void diskMonitor()
|
|||||||
}
|
}
|
||||||
|
|
||||||
//do Gluster status check, if configured
|
//do Gluster status check, if configured
|
||||||
if ( GlusterConfig == "y")
|
if ( DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
bool pass = true;
|
bool pass = true;
|
||||||
string errmsg = "unknown";
|
string errmsg = "unknown";
|
||||||
|
@@ -51,7 +51,7 @@ bool amazon = false;
|
|||||||
string PMInstanceType;
|
string PMInstanceType;
|
||||||
string UMInstanceType;
|
string UMInstanceType;
|
||||||
string AmazonPMFailover = "y";
|
string AmazonPMFailover = "y";
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
bool rootUser = true;
|
bool rootUser = true;
|
||||||
string USER = "root";
|
string USER = "root";
|
||||||
bool HDFS = false;
|
bool HDFS = false;
|
||||||
@@ -153,11 +153,11 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
//get gluster config
|
//get gluster config
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
//hdfs / hadoop config
|
//hdfs / hadoop config
|
||||||
|
@@ -48,7 +48,7 @@ extern bool runStandby;
|
|||||||
extern string iface_name;
|
extern string iface_name;
|
||||||
extern string PMInstanceType;
|
extern string PMInstanceType;
|
||||||
extern string UMInstanceType;
|
extern string UMInstanceType;
|
||||||
extern string GlusterConfig;
|
extern string DataRedundancyConfig;
|
||||||
extern bool rootUser;
|
extern bool rootUser;
|
||||||
extern string USER;
|
extern string USER;
|
||||||
extern bool HDFS;
|
extern bool HDFS;
|
||||||
@@ -8539,7 +8539,7 @@ int ProcessManager::switchParentOAMModule(std::string newActiveModuleName)
|
|||||||
|
|
||||||
log.writeLog(__LINE__, "switchParentOAMModule Function Started", LOG_TYPE_DEBUG);
|
log.writeLog(__LINE__, "switchParentOAMModule Function Started", LOG_TYPE_DEBUG);
|
||||||
|
|
||||||
if ( DBRootStorageType == "internal" && GlusterConfig == "n") {
|
if ( DBRootStorageType == "internal" && DataRedundancyConfig == "n") {
|
||||||
log.writeLog(__LINE__, "ERROR: DBRootStorageType = internal", LOG_TYPE_ERROR);
|
log.writeLog(__LINE__, "ERROR: DBRootStorageType = internal", LOG_TYPE_ERROR);
|
||||||
pthread_mutex_unlock(&THREAD_LOCK);
|
pthread_mutex_unlock(&THREAD_LOCK);
|
||||||
return API_INVALID_PARAMETER;
|
return API_INVALID_PARAMETER;
|
||||||
@@ -9138,7 +9138,7 @@ int ProcessManager::OAMParentModuleChange()
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( DBRootStorageType == "internal" && failover && GlusterConfig == "n")
|
if ( DBRootStorageType == "internal" && failover && DataRedundancyConfig == "n")
|
||||||
{
|
{
|
||||||
log.writeLog(__LINE__, "DBRoot Storage configured for internal, don't do standby-active failover", LOG_TYPE_DEBUG);
|
log.writeLog(__LINE__, "DBRoot Storage configured for internal, don't do standby-active failover", LOG_TYPE_DEBUG);
|
||||||
|
|
||||||
@@ -9537,7 +9537,7 @@ std::string ProcessManager::getStandbyModule()
|
|||||||
|
|
||||||
//check if gluster, if so then find PMs that have copies of DBROOT #1
|
//check if gluster, if so then find PMs that have copies of DBROOT #1
|
||||||
string pmList = "";
|
string pmList = "";
|
||||||
if (GlusterConfig == "y") {
|
if (DataRedundancyConfig == "y") {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
string errmsg;
|
string errmsg;
|
||||||
@@ -10024,7 +10024,7 @@ int ProcessManager::mountDBRoot(std::string dbrootID)
|
|||||||
ProcessManager processManager(config, log);
|
ProcessManager processManager(config, log);
|
||||||
Oam oam;
|
Oam oam;
|
||||||
|
|
||||||
if (GlusterConfig == "y")
|
if (DataRedundancyConfig == "y")
|
||||||
return oam::API_SUCCESS;
|
return oam::API_SUCCESS;
|
||||||
|
|
||||||
//get pm assigned to that dbroot
|
//get pm assigned to that dbroot
|
||||||
|
@@ -47,7 +47,7 @@ static void mysqlMonitorThread(MonitorConfig config);
|
|||||||
string systemOAM;
|
string systemOAM;
|
||||||
string dm_server;
|
string dm_server;
|
||||||
string cloud;
|
string cloud;
|
||||||
string GlusterConfig = "n";
|
string DataRedundancyConfig = "n";
|
||||||
bool HDFS = false;
|
bool HDFS = false;
|
||||||
|
|
||||||
void updateShareMemory(processStatusList* aPtr);
|
void updateShareMemory(processStatusList* aPtr);
|
||||||
@@ -207,14 +207,14 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
//get gluster config
|
//get gluster config
|
||||||
try {
|
try {
|
||||||
oam.getSystemConfig( "GlusterConfig", GlusterConfig);
|
oam.getSystemConfig( "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{
|
{
|
||||||
GlusterConfig = "n";
|
DataRedundancyConfig = "n";
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( GlusterConfig == "y" ) {
|
if ( DataRedundancyConfig == "y" ) {
|
||||||
system("mount -a > /dev/null 2>&1");
|
system("mount -a > /dev/null 2>&1");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,7 +524,7 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
//check if gluster, if so then find PMs that have copies of DBROOT #1
|
//check if gluster, if so then find PMs that have copies of DBROOT #1
|
||||||
string pmList = "";
|
string pmList = "";
|
||||||
if (GlusterConfig == "y") {
|
if (DataRedundancyConfig == "y") {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
string errmsg;
|
string errmsg;
|
||||||
@@ -2021,7 +2021,7 @@ static void statusControlThread()
|
|||||||
|
|
||||||
std::vector<string>dbrootList;
|
std::vector<string>dbrootList;
|
||||||
if ( DBRootStorageType == "external" ||
|
if ( DBRootStorageType == "external" ||
|
||||||
GlusterConfig == "y") {
|
DataRedundancyConfig == "y") {
|
||||||
//get system dbroots
|
//get system dbroots
|
||||||
DBRootConfigList dbrootConfigList;
|
DBRootConfigList dbrootConfigList;
|
||||||
try
|
try
|
||||||
|
@@ -46,7 +46,7 @@ extern bool runStandby;
|
|||||||
extern bool processInitComplete;
|
extern bool processInitComplete;
|
||||||
extern int fmoduleNumber;
|
extern int fmoduleNumber;
|
||||||
extern string cloud;
|
extern string cloud;
|
||||||
extern string GlusterConfig;
|
extern string DataRedundancyConfig;
|
||||||
extern bool rootUser;
|
extern bool rootUser;
|
||||||
extern string USER;
|
extern string USER;
|
||||||
extern bool HDFS;
|
extern bool HDFS;
|
||||||
@@ -1615,7 +1615,7 @@ void ProcessMonitor::processMessage(messageqcpp::ByteStream msg, messageqcpp::IO
|
|||||||
flushInodeCache();
|
flushInodeCache();
|
||||||
|
|
||||||
int return_status = API_SUCCESS;
|
int return_status = API_SUCCESS;
|
||||||
if (GlusterConfig == "n")
|
if (DataRedundancyConfig == "n")
|
||||||
{
|
{
|
||||||
int retry = 1;
|
int retry = 1;
|
||||||
for ( ; retry < 5 ; retry++)
|
for ( ; retry < 5 ; retry++)
|
||||||
@@ -1689,7 +1689,7 @@ void ProcessMonitor::processMessage(messageqcpp::ByteStream msg, messageqcpp::IO
|
|||||||
log.writeLog(__LINE__, "MSG RECEIVED: Mount DBRoot: " + dbrootID);;
|
log.writeLog(__LINE__, "MSG RECEIVED: Mount DBRoot: " + dbrootID);;
|
||||||
|
|
||||||
int return_status = API_SUCCESS;
|
int return_status = API_SUCCESS;
|
||||||
if (GlusterConfig == "n")
|
if (DataRedundancyConfig == "n")
|
||||||
{
|
{
|
||||||
string cmd = "export LC_ALL=C;mount " + startup::StartUp::installDir() + "/data" + dbrootID + " > /tmp/mount.txt 2>&1";
|
string cmd = "export LC_ALL=C;mount " + startup::StartUp::installDir() + "/data" + dbrootID + " > /tmp/mount.txt 2>&1";
|
||||||
system(cmd.c_str());
|
system(cmd.c_str());
|
||||||
@@ -2461,7 +2461,7 @@ pid_t ProcessMonitor::startProcess(string processModuleType, string processName,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// now delete the dbrm data from local disk
|
// now delete the dbrm data from local disk
|
||||||
if ( !gOAMParentModuleFlag && !HDFS && GlusterConfig == "n") {
|
if ( !gOAMParentModuleFlag && !HDFS && DataRedundancyConfig == "n") {
|
||||||
string cmd = "rm -f " + DBRMDir + "/*";
|
string cmd = "rm -f " + DBRMDir + "/*";
|
||||||
system(cmd.c_str());
|
system(cmd.c_str());
|
||||||
log.writeLog(__LINE__, "removed DBRM file with command: " + cmd, LOG_TYPE_DEBUG);
|
log.writeLog(__LINE__, "removed DBRM file with command: " + cmd, LOG_TYPE_DEBUG);
|
||||||
@@ -5719,12 +5719,12 @@ void ProcessMonitor::unmountExtraDBroots()
|
|||||||
oam.getSystemConfig("DBRootStorageType", DBRootStorageType);
|
oam.getSystemConfig("DBRootStorageType", DBRootStorageType);
|
||||||
|
|
||||||
if ( DBRootStorageType == "hdfs" ||
|
if ( DBRootStorageType == "hdfs" ||
|
||||||
( DBRootStorageType == "internal" && GlusterConfig == "n") )
|
( DBRootStorageType == "internal" && DataRedundancyConfig == "n") )
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
// if (GlusterConfig == "y")
|
// if (DataRedundancyConfig == "y")
|
||||||
// return;
|
// return;
|
||||||
|
|
||||||
try
|
try
|
||||||
@@ -5755,7 +5755,7 @@ void ProcessMonitor::unmountExtraDBroots()
|
|||||||
|
|
||||||
if (config.moduleID() != moduleID)
|
if (config.moduleID() != moduleID)
|
||||||
{
|
{
|
||||||
if ( GlusterConfig == "n" )
|
if ( DataRedundancyConfig == "n" )
|
||||||
{
|
{
|
||||||
string cmd = "umount " + startup::StartUp::installDir() + "/data" + oam.itoa(id) + " > /dev/null 2>&1";
|
string cmd = "umount " + startup::StartUp::installDir() + "/data" + oam.itoa(id) + " > /dev/null 2>&1";
|
||||||
system(cmd.c_str());
|
system(cmd.c_str());
|
||||||
@@ -5863,7 +5863,7 @@ int ProcessMonitor::checkDataMount()
|
|||||||
catch(...) {}
|
catch(...) {}
|
||||||
|
|
||||||
//asign DBRoot is gluster
|
//asign DBRoot is gluster
|
||||||
if (GlusterConfig == "y")
|
if (DataRedundancyConfig == "y")
|
||||||
{
|
{
|
||||||
vector<string>::iterator p = dbrootList.begin();
|
vector<string>::iterator p = dbrootList.begin();
|
||||||
while ( p != dbrootList.end() )
|
while ( p != dbrootList.end() )
|
||||||
@@ -5886,7 +5886,7 @@ int ProcessMonitor::checkDataMount()
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ( DBRootStorageType == "hdfs" ||
|
if ( DBRootStorageType == "hdfs" ||
|
||||||
(DBRootStorageType == "internal" && GlusterConfig == "n") ) {
|
(DBRootStorageType == "internal" && DataRedundancyConfig == "n") ) {
|
||||||
//create OAM-Test-Flag
|
//create OAM-Test-Flag
|
||||||
vector<string>::iterator p = dbrootList.begin();
|
vector<string>::iterator p = dbrootList.begin();
|
||||||
while ( p != dbrootList.end() )
|
while ( p != dbrootList.end() )
|
||||||
@@ -5921,7 +5921,7 @@ int ProcessMonitor::checkDataMount()
|
|||||||
string dbroot = installDir + "/data" + *p;
|
string dbroot = installDir + "/data" + *p;
|
||||||
string fileName = dbroot + "/OAMdbrootCheck";
|
string fileName = dbroot + "/OAMdbrootCheck";
|
||||||
|
|
||||||
if ( GlusterConfig == "n" ) {
|
if ( DataRedundancyConfig == "n" ) {
|
||||||
//remove any local check flag for starters
|
//remove any local check flag for starters
|
||||||
string cmd = "umount " + dbroot + " > /tmp/umount.txt 2>&1";
|
string cmd = "umount " + dbroot + " > /tmp/umount.txt 2>&1";
|
||||||
system(cmd.c_str());
|
system(cmd.c_str());
|
||||||
|
@@ -201,21 +201,21 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
|
|
||||||
//set gluster flag if it exists
|
//set gluster flag if it exists
|
||||||
string GlusterConfig;
|
string DataRedundancyConfig;
|
||||||
string GlusterCopies;
|
string DataRedundancyCopies;
|
||||||
string GlusterStorageType;
|
string DataRedundancyStorageType;
|
||||||
try {
|
try {
|
||||||
GlusterConfig = sysConfigOld->getConfig(InstallSection, "GlusterConfig");
|
DataRedundancyConfig = sysConfigOld->getConfig(InstallSection, "DataRedundancyConfig");
|
||||||
GlusterCopies = sysConfigOld->getConfig(InstallSection, "GlusterCopies");
|
DataRedundancyCopies = sysConfigOld->getConfig(InstallSection, "DataRedundancyCopies");
|
||||||
GlusterStorageType = sysConfigOld->getConfig(InstallSection, "GlusterStorageType");
|
DataRedundancyStorageType = sysConfigOld->getConfig(InstallSection, "DataRedundancyStorageType");
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{}
|
{}
|
||||||
if ( !GlusterConfig.empty() ) {
|
if ( !DataRedundancyConfig.empty() ) {
|
||||||
try {
|
try {
|
||||||
sysConfigNew->setConfig(InstallSection, "GlusterConfig", GlusterConfig);
|
sysConfigNew->setConfig(InstallSection, "DataRedundancyConfig", DataRedundancyConfig);
|
||||||
sysConfigNew->setConfig(InstallSection, "GlusterCopies", GlusterCopies);
|
sysConfigNew->setConfig(InstallSection, "DataRedundancyCopies", DataRedundancyCopies);
|
||||||
sysConfigNew->setConfig(InstallSection, "GlusterStorageType", GlusterStorageType);
|
sysConfigNew->setConfig(InstallSection, "DataRedundancyStorageType", DataRedundancyStorageType);
|
||||||
}
|
}
|
||||||
catch(...)
|
catch(...)
|
||||||
{}
|
{}
|
||||||
|
Reference in New Issue
Block a user