1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

Move config files

This patch:

* Moves config files from /usr/local/mariadb/columnstore/etc to
ENGINE_SYSCONFDIR/columnstore (ENGINE_SYSCONFDIR is /etc by default)
* Sets a define called MCSSYSCONFDIR whic contains the
ENGINE_SYSCONFDIR compile time setting
* Modifies scripts and code to use the new paths
* Removes a whole bunch of files we don't use
This commit is contained in:
Andrew Hutchings
2019-09-05 18:13:23 +01:00
parent 016523ef47
commit 97bda78c3b
67 changed files with 535 additions and 5808 deletions

View File

@ -123,19 +123,7 @@ ENDIF("${isSystemDir}" STREQUAL "-1")
INCLUDE (configureEngine)
# releasenum is used by external scripts for various tasks. Leave it alone.
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/build/releasenum.in ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum IMMEDIATE)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum DESTINATION ${INSTALL_ENGINE} COMPONENT platform)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h.in ${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h)
exec_program("git"
${CMAKE_CURRENT_SOURCE_DIR}
ARGS "describe --match=NeVeRmAtCh --always --dirty"
OUTPUT_VARIABLE GIT_VERSION)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/gitversionEngine.in ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine IMMEDIATE)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine DESTINATION ${INSTALL_ENGINE} COMPONENT platform)
FIND_PROGRAM(LEX_EXECUTABLE flex DOC "path to the flex executable")
if(NOT LEX_EXECUTABLE)
@ -204,8 +192,7 @@ SET (ENGINE_LDFLAGS "-Wl,--no-as-needed -Wl,--add-needed")
SET (ENGINE_LIBDIR "${INSTALL_ENGINE}/lib")
SET (ENGINE_BINDIR "${INSTALL_ENGINE}/bin")
SET (ENGINE_INCDIR "${INSTALL_ENGINE}/include")
SET (ENGINE_ETCDIR "${INSTALL_ENGINE}/etc")
SET (ENGINE_SYSCONFDIR "${INSTALL_ENGINE}/etc")
SET (ENGINE_SYSCONFDIR "/etc")
SET (ENGINE_MANDIR "${INSTALL_ENGINE}/man")
SET (ENGINE_SBINDIR "${INSTALL_ENGINE}/sbin")
SET (ENGINE_SHAREDIR "${INSTALL_ENGINE}/share")
@ -329,5 +316,19 @@ IF( WITH_SHARED_COMP_TESTS )
ADD_SUBDIRECTORY(writeengine/shared)
ENDIF( WITH_SHARED_COMP_TESTS )
# releasenum is used by external scripts for various tasks. Leave it alone.
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/build/releasenum.in ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum IMMEDIATE)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum DESTINATION ${INSTALL_ENGINE} COMPONENT platform)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h.in ${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h)
exec_program("git"
${CMAKE_CURRENT_SOURCE_DIR}
ARGS "describe --match=NeVeRmAtCh --always --dirty"
OUTPUT_VARIABLE GIT_VERSION)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/gitversionEngine.in ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine IMMEDIATE)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine DESTINATION ${INSTALL_ENGINE} COMPONENT platform)
INCLUDE(cpackEngineRPM)
INCLUDE(cpackEngineDEB)

View File

@ -387,4 +387,6 @@
code using `volatile' can become incorrect without. Disable with care. */
#cmakedefine volatile
#define MCSSYSCONFDIR "${ENGINE_SYSCONFDIR}"
#endif

View File

@ -1,494 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- $Id: config-dec.xml 3499 2007-10-28 02:13:13Z rdempsey $ -->
<Calpont Version="V1.0.0">
<PrimitiveServers>
<Count>5</Count>
<ServerThreads>1</ServerThreads>
<ServerQueueSize>100</ServerQueueSize>
<ProcessorThreads>1</ProcessorThreads> <!-- 30 -->
<ProcessorQueueSize>100</ProcessorQueueSize>
<ReadThreads>200</ReadThreads>
<ReadQueueSize>400</ReadQueueSize>
<DebugLevel>1</DebugLevel>
<LBID_Mask>31</LBID_Mask>
<LBID_Shift>10</LBID_Shift>
<DataElementsPerMessage>1024</DataElementsPerMessage>
<JoinElementsPerMessage>1024</JoinElementsPerMessage>
<MinNumDeliverable>1000</MinNumDeliverable>
<MinNumExecutable>1000</MinNumExecutable>
<ThrottleToCompletion>1</ThrottleToCompletion>
</PrimitiveServers>
<PrimitiveServer1>
<PM1>PMS1</PM1>
<PM2>PMS2</PM2>
<PM3>PMS3</PM3>
<PM4>PMS4</PM4>
</PrimitiveServer1>
<PrimitiveServer2>
<PM1>PMS5</PM1>
<PM2>PMS6</PM2>
<PM3>PMS7</PM3>
<PM4>PMS8</PM4>
</PrimitiveServer2>
<PrimitiveServer3>
<PM1>PMS9</PM1>
<PM2>PMS10</PM2>
<PM3>PMS11</PM3>
<PM4>PMS12</PM4>
</PrimitiveServer3>
<PrimitiveServer4>
<PM1>PMS13</PM1>
<PM2>PMS14</PM2>
<PM3>PMS15</PM3>
<PM4>PMS16</PM4>
</PrimitiveServer4>
<PrimitiveServer5>
<PM1>PMS17</PM1>
<PM2>PMS18</PM2>
<PM3>PMS19</PM3>
<PM4>PMS20</PM4>
</PrimitiveServer5>
<PMS1>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS1>
<PMS2>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS2>
<PMS3>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS3>
<PMS4>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS4>
<PMS5>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS5>
<PMS6>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS6>
<PMS7>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS7>
<PMS8>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS8>
<PMS9>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS9>
<PMS10>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS10>
<PMS11>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS11>
<PMS12>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS12>
<PMS13>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS13>
<PMS14>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS14>
<PMS15>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS15>
<PMS16>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS16>
<PMS17>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS17>
<PMS18>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS18>
<PMS19>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS19>
<PMS20>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS20>
<PMS21>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS21>
<PMS22>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS22>
<PMS23>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS23>
<PMS24>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS24>
<PMS25>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS25>
<PMS26>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS26>
<PMS27>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS27>
<PMS28>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS28>
<PMS29>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS29>
<PMS30>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS30>
<PMS31>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS31>
<PMS32>
<IPAddr>127.0.0.1</IPAddr>
<Port>18620</Port>
</PMS32>
<MessageLog>
<MessageLogFile>/home/rdempsey/Calpont/etc/MessageFile.txt</MessageLogFile>
</MessageLog>
<Messageq>
<Dir>/var/tmp</Dir>
</Messageq>
<ExeMgr1>
<IPAddr>127.0.0.1</IPAddr>
<Port>18601</Port>
<ServerThreads>50</ServerThreads>
<ServerQueueSize>100</ServerQueueSize>
</ExeMgr1>
<ExeMgr2>
<IPAddr>127.0.0.1</IPAddr>
<Port>18601</Port>
<ServerThreads>50</ServerThreads>
<ServerQueueSize>100</ServerQueueSize>
</ExeMgr2>
<JobProc>
<IPAddr>127.0.0.1</IPAddr>
<Port>18602</Port>
</JobProc>
<ProcMgr>
<IPAddr>127.0.0.1</IPAddr>
<Port>18604</Port>
</ProcMgr>
<CalpontConsole>
<IPAddr>127.0.0.1</IPAddr>
<Port>18606</Port>
</CalpontConsole>
<dm1_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18630</Port>
</dm1_ProcessMonitor>
<dm2_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18632</Port>
</dm2_ProcessMonitor>
<um1_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18634</Port>
</um1_ProcessMonitor>
<um2_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18636</Port>
</um2_ProcessMonitor>
<um3_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18638</Port>
</um3_ProcessMonitor>
<pm1_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18640</Port>
</pm1_ProcessMonitor>
<pm2_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18642</Port>
</pm2_ProcessMonitor>
<pm3_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18644</Port>
</pm3_ProcessMonitor>
<ms1_ProcessMonitor>
<IPAddr>127.0.0.1</IPAddr>
<Port>18650</Port>
</ms1_ProcessMonitor>
<DDLProc>
<IPAddr>127.0.0.1</IPAddr>
<Port>18612</Port>
</DDLProc>
<DMLProc>
<IPAddr>127.0.0.1</IPAddr>
<Port>18614</Port>
</DMLProc>
<ProcStatusControl>
<IPAddr>127.0.0.1</IPAddr>
<Port>18625</Port>
</ProcStatusControl>
<SystemConfig>
<SystemVersion>V1.0.0.0</SystemVersion>
<ModuleHeartbeatPeriod>5</ModuleHeartbeatPeriod>
<ModuleHeartbeatCount>3</ModuleHeartbeatCount>
<ProcessHeartbeatPeriod>60</ProcessHeartbeatPeriod>
<DBRoot1>/home/rdempsey/Calpont/data1</DBRoot1>
<!--
<DBRoot>/home/rdempsey/Calpont/data1</DBRoot>
<DBRoot2>/home/rdempsey/Calpont/data2</DBRoot2>
<DBRoot3>/home/rdempsey/Calpont/data3</DBRoot3>
<DBRoot4>/home/rdempsey/Calpont/data4</DBRoot4>
-->
<DBRMRoot>/home/rdempsey/Calpont/dbrm/BRM_saves</DBRMRoot>
<RAIDCriticalThreshold>90</RAIDCriticalThreshold>
<RAIDMajorThreshold>80</RAIDMajorThreshold>
<RAIDMinorThreshold>70</RAIDMinorThreshold>
<ParentOAMModuleName>dm1</ParentOAMModuleName>
<!-- these Temp items are to support the DataSwapSpace disk/memory management -->
<TempDiskMaxTotal>2048MB</TempDiskMaxTotal>
<TempDiskMinSpace>128MB</TempDiskMinSpace>
<TempDiskMaxSpace>2048MB</TempDiskMaxSpace>
<TempMemoryMapBlock>32</TempMemoryMapBlock>
<TempDiskPath>/var/tmp</TempDiskPath>
<!-- MemoryDebugLevel values 1 through 4 get copious quantities of information logged -->
<MemoryDebugLevel>0</MemoryDebugLevel>
<!-- <WorkingDir>/tmp/working</WorkingDir> -->
</SystemConfig>
<BackupConfig>
<BackupDestination>NONE</BackupDestination>
<BackupSource>NONE</BackupSource>
</BackupConfig>
<SystemModuleConfig>
<ModuleType1>dm</ModuleType1>
<ModuleDesc1>Director Module</ModuleDesc1>
<ModuleCount1>1</ModuleCount1>
<ModuleIPAddr1-1>127.0.0.1</ModuleIPAddr1-1>
<ModuleHostName1-1>Unassigned</ModuleHostName1-1>
<ModuleCPUCriticalThreshold1>90</ModuleCPUCriticalThreshold1>
<ModuleCPUMajorThreshold1>80</ModuleCPUMajorThreshold1>
<ModuleCPUMinorThreshold1>70</ModuleCPUMinorThreshold1>
<ModuleCPUMinorClearThreshold1>60</ModuleCPUMinorClearThreshold1>
<ModuleDiskCriticalThreshold1>90</ModuleDiskCriticalThreshold1>
<ModuleDiskMajorThreshold1>80</ModuleDiskMajorThreshold1>
<ModuleDiskMinorThreshold1>70</ModuleDiskMinorThreshold1>
<ModuleMemCriticalThreshold1>90</ModuleMemCriticalThreshold1>
<ModuleMemMajorThreshold1>80</ModuleMemMajorThreshold1>
<ModuleMemMinorThreshold1>70</ModuleMemMinorThreshold1>
<ModuleSwapCriticalThreshold1>90</ModuleSwapCriticalThreshold1>
<ModuleSwapMajorThreshold1>80</ModuleSwapMajorThreshold1>
<ModuleSwapMinorThreshold1>70</ModuleSwapMinorThreshold1>
<ModuleDiskMonitorFileSystem1-1>/</ModuleDiskMonitorFileSystem1-1>
<ModuleType2>um</ModuleType2>
<ModuleDesc2>User Module</ModuleDesc2>
<ModuleCount2>1</ModuleCount2>
<ModuleIPAddr1-2>127.0.0.1</ModuleIPAddr1-2>
<ModuleHostName1-2>Unassigned</ModuleHostName1-2>
<ModuleCPUCriticalThreshold2>90</ModuleCPUCriticalThreshold2>
<ModuleCPUMajorThreshold2>80</ModuleCPUMajorThreshold2>
<ModuleCPUMinorThreshold2>70</ModuleCPUMinorThreshold2>
<ModuleCPUMinorClearThreshold2>60</ModuleCPUMinorClearThreshold2>
<ModuleDiskCriticalThreshold2>90</ModuleDiskCriticalThreshold2>
<ModuleDiskMajorThreshold2>80</ModuleDiskMajorThreshold2>
<ModuleDiskMinorThreshold2>70</ModuleDiskMinorThreshold2>
<ModuleMemCriticalThreshold2>90</ModuleMemCriticalThreshold2>
<ModuleMemMajorThreshold2>80</ModuleMemMajorThreshold2>
<ModuleMemMinorThreshold2>70</ModuleMemMinorThreshold2>
<ModuleSwapCriticalThreshold2>90</ModuleSwapCriticalThreshold2>
<ModuleSwapMajorThreshold2>80</ModuleSwapMajorThreshold2>
<ModuleSwapMinorThreshold2>70</ModuleSwapMinorThreshold2>
<ModuleDiskMonitorFileSystem1-2>/</ModuleDiskMonitorFileSystem1-2>
<ModuleType3>pm</ModuleType3>
<ModuleDesc3>Performance Module</ModuleDesc3>
<ModuleCount3>1</ModuleCount3>
<ModuleIPAddr1-3>127.0.0.1</ModuleIPAddr1-3>
<ModuleHostName1-3>Unassigned</ModuleHostName1-3>
<ModuleCPUCriticalThreshold3>90</ModuleCPUCriticalThreshold3>
<ModuleCPUMajorThreshold3>80</ModuleCPUMajorThreshold3>
<ModuleCPUMinorThreshold3>70</ModuleCPUMinorThreshold3>
<ModuleCPUMinorClearThreshold3>60</ModuleCPUMinorClearThreshold3>
<ModuleDiskCriticalThreshold3>90</ModuleDiskCriticalThreshold3>
<ModuleDiskMajorThreshold3>80</ModuleDiskMajorThreshold3>
<ModuleDiskMinorThreshold3>70</ModuleDiskMinorThreshold3>
<ModuleMemCriticalThreshold3>90</ModuleMemCriticalThreshold3>
<ModuleMemMajorThreshold3>80</ModuleMemMajorThreshold3>
<ModuleMemMinorThreshold3>70</ModuleMemMinorThreshold3>
<ModuleSwapCriticalThreshold3>90</ModuleSwapCriticalThreshold3>
<ModuleSwapMajorThreshold3>80</ModuleSwapMajorThreshold3>
<ModuleSwapMinorThreshold3>70</ModuleSwapMinorThreshold3>
<ModuleDiskMonitorFileSystem1-3>/</ModuleDiskMonitorFileSystem1-3>
<ModuleType4>mm</ModuleType4>
<ModuleDesc4>Management Module</ModuleDesc4>
<ModuleCount4>0</ModuleCount4>
<ModuleIPAddr1-4>127.0.0.1</ModuleIPAddr1-4>
<ModuleHostName1-4>Unassigned</ModuleHostName1-4>
<ModuleCPUCriticalThreshold4>90</ModuleCPUCriticalThreshold4>
<ModuleCPUMajorThreshold4>80</ModuleCPUMajorThreshold4>
<ModuleCPUMinorThreshold4>70</ModuleCPUMinorThreshold4>
<ModuleCPUMinorClearThreshold4>60</ModuleCPUMinorClearThreshold4>
<ModuleDiskCriticalThreshold4>90</ModuleDiskCriticalThreshold4>
<ModuleDiskMajorThreshold4>80</ModuleDiskMajorThreshold4>
<ModuleDiskMinorThreshold4>70</ModuleDiskMinorThreshold4>
<ModuleMemCriticalThreshold4>90</ModuleMemCriticalThreshold4>
<ModuleMemMajorThreshold4>80</ModuleMemMajorThreshold4>
<ModuleMemMinorThreshold4>70</ModuleMemMinorThreshold4>
<ModuleSwapCriticalThreshold4>90</ModuleSwapCriticalThreshold4>
<ModuleSwapMajorThreshold4>80</ModuleSwapMajorThreshold4>
<ModuleSwapMinorThreshold4>70</ModuleSwapMinorThreshold4>
<ModuleDiskMonitorFileSystem1-4>/</ModuleDiskMonitorFileSystem1-4>
</SystemModuleConfig>
<SystemSwitchConfig>
<SwitchType1>es</SwitchType1>
<SwitchDesc1>Ethernet Switch</SwitchDesc1>
<SwitchCount1>1</SwitchCount1>
<SwitchIPAddr1-1>127.0.0.1</SwitchIPAddr1-1>
<SwitchHostName1-1>Unassigned</SwitchHostName1-1>
<SwitchType2>fs</SwitchType2>
<SwitchDesc2>Fiber Channel Switch</SwitchDesc2>
<SwitchCount2>1</SwitchCount2>
<SwitchIPAddr1-2>127.0.0.1</SwitchIPAddr1-2>
<SwitchHostName1-2>Unassigned</SwitchHostName1-2>
</SystemSwitchConfig>
<SystemStorageConfig>
<StorageType1>cu</StorageType1>
<StorageDesc1>Controller Unit</StorageDesc1>
<StorageCount1>1</StorageCount1>
<StorageIPAddr1-1>127.0.0.1</StorageIPAddr1-1>
<StorageHostName1-1>Unassigned</StorageHostName1-1>
<StorageType2>eu</StorageType2>
<StorageDesc2>Expansion Unit</StorageDesc2>
<StorageCount2>1</StorageCount2>
<StorageIPAddr1-2>127.0.0.1</StorageIPAddr1-2>
<StorageHostName1-2>Unassigned</StorageHostName1-2>
</SystemStorageConfig>
<SessionManager>
<MaxConcurrentTransactions>1000</MaxConcurrentTransactions>
<SharedMemoryTmpFile>/tmp/CalpontShm</SharedMemoryTmpFile>
<TxnIDFile>/home/rdempsey/Calpont/dbrm/SMTxnID</TxnIDFile>
</SessionManager>
<VersionBuffer>
<NumVersionBufferFiles>4</NumVersionBufferFiles>
<VersionBufferFileSize>1073741824</VersionBufferFileSize><!-- 1GB/file -->
<VersionBufferOID0>0</VersionBufferOID0>
<VersionBufferOID1>1</VersionBufferOID1>
<VersionBufferOID2>2</VersionBufferOID2>
<VersionBufferOID3>3</VersionBufferOID3>
</VersionBuffer>
<OIDManager>
<!-- XXXPAT: This is located in tmp so we all have R/W access.
It should be relocated to /usr/local/Calpont/share/oidbitmap
on the production machine-->
<OIDBitmapFile>/home/rdempsey/Calpont/dbrm/oidbitmap</OIDBitmapFile>
<FirstOID>3000</FirstOID>
</OIDManager>
<OracleConnector>
<OnDisconnect>Commit</OnDisconnect> <!-- or Rollback -->
</OracleConnector>
<WriteEngine>
<BulkRoot>/home/rdempsey/Calpont/bulk</BulkRoot>
</WriteEngine>
<!-- The DBRM configuration entries should look like this.
<DBRM_Master>
<NumSlaves>2</NumSlaves>
<IPAddr>10.100.4.69</IPAddr>
<Port>60310</Port>
</DBRM_Master>
<DBRM_Slave1>
<IPAddr>10.100.4.69</IPAddr>
<Port>60311</Port>
</DBRM_Slave1>
<DBRM_Slave2>
<IPAddr>10.100.4.68</IPAddr>
<Port>60311</Port>
</DBRM_Slave2>
-->
<!-- This is a one-node localhost configuration to use
for testing & integration -->
<DBRM_Master>
<NumSlaves>1</NumSlaves>
<IPAddr>127.0.0.1</IPAddr>
<Port>60310</Port>
</DBRM_Master>
<DBRM_Slave1>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave1>
<DBRM_Slave2>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave2>
<DBRM_Slave3>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave3>
<DBRM_Slave4>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave4>
<DBRM_Slave5>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave5>
<DBRM_Slave6>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave6>
<DBRM_Slave7>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave7>
<DBRM_Slave8>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave8>
<DBRM_Slave9>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave9>
<DBRM_Slave10>
<IPAddr>127.0.0.1</IPAddr>
<Port>60311</Port>
<Module>dm1</Module>
</DBRM_Slave10>
</Calpont>

View File

@ -3,4 +3,4 @@ install(FILES AlarmConfig.xml
Columnstore.xml
ProcessConfig.xml
ConsoleCmds.xml
DESTINATION ${ENGINE_ETCDIR} COMPONENT platform)
DESTINATION ${ENGINE_SYSCONFDIR}/columnstore COMPONENT platform)

View File

@ -1,3 +1,10 @@
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/syslogSetup.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/syslogSetup.sh" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/post-install.in" "${CMAKE_CURRENT_SOURCE_DIR}/post-install" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/columnstore.in" "${CMAKE_CURRENT_SOURCE_DIR}/columnstore" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/columnstoreInstall.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/columnstoreInstall.sh" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/pre-uninstall.in" "${CMAKE_CURRENT_SOURCE_DIR}/pre-uninstall" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/columnstoreLogRotate.in" "${CMAKE_CURRENT_SOURCE_DIR}/columnstoreLogRotate" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/postInstall.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/postInstall.sh" @ONLY)
install(PROGRAMS post-install
pre-uninstall

View File

@ -79,7 +79,7 @@ tmpDir=`$InstallDir/bin/getConfig SystemConfig SystemTempFileDir`
mkdir $tmpDir >/dev/null 2>&1
checkInstallSetup() {
InitialInstallFlag=`$InstallDir/bin/getConfig -c $InstallDir/etc/Columnstore.xml Installation InitialInstallFlag`
InitialInstallFlag=`$InstallDir/bin/getConfig -c @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml Installation InitialInstallFlag`
if [ $InitialInstallFlag != "y" ]; then
echo "Please run the postConfigure install script, check the Installation Guide"
echo "for additional details"
@ -108,7 +108,7 @@ start() {
fi
#checkInstallSetup
CoreFileFlag=`$InstallDir/bin/getConfig -c $InstallDir/etc/Columnstore.xml Installation CoreFileFlag`
CoreFileFlag=`$InstallDir/bin/getConfig -c @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml Installation CoreFileFlag`
if [ $CoreFileFlag = "y" ]; then
#columnstore core files
ulimit -c unlimited > /dev/null 2>&1

View File

@ -34,7 +34,7 @@ while true {
send_user " password - root password on the servers being installed'\n"
send_user " package-type - Package Type being installed (rpm, deb, or binary)\n"
send_user " config-file - Optional: Columnstore.xml config file with directory location, i.e. /root/Columnstore.xml\n"
send_user " Default version is $INSTALLDIR/etc/Columnstore.xml.rpmsave\n"
send_user " Default version is @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave\n"
send_user " mysql-password - MySQL password on the servers being installed'\n"
send_user " -d - Debug flag, output verbose information\n"
exit 0
@ -78,16 +78,16 @@ expect {
}
if { $CONFIGFILE == " " } {
set CONFIGFILE $INSTALLDIR/etc/Columnstore.xml.rpmsave
set CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave
}
if { [catch { open $CONFIGFILE "r"} handle ] } {
puts "Calpont Config file not found: $CONFIGFILE"; exit 1
}
exec rm -f $INSTALLDIR/etc/Columnstore.xml.new > /dev/null 2>&1
exec mv -f $INSTALLDIR/etc/Columnstore.xml $INSTALLDIR/etc/Columnstore.xml.new > /dev/null 2>&1
exec /bin/cp -f $CONFIGFILE $INSTALLDIR/etc/Columnstore.xml > /dev/null 2>&1
exec rm -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.new > /dev/null 2>&1
exec mv -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.new > /dev/null 2>&1
exec /bin/cp -f $CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml > /dev/null 2>&1
set timeout 2
set INSTALL 2

View File

@ -12,11 +12,11 @@
olddir /var/log/mariadb/columnstore/archive
su root root
}
/usr/local/mariadb/columnstore/etc/Columnstore.xml {
@ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml {
daily
dateext
copy
olddir /usr/local/mariadb/columnstore/etc/
olddir @ENGINE_SYSCONFDIR@/columnstore
}
/usr/local/mariadb/columnstore/mysql/db/*.err {
missingok

View File

@ -68,8 +68,8 @@ if [ $is64bitpkg -eq 1 -a $is64bitos -ne 1 ]; then
exit 1
fi
if [ ! -f $installdir/etc/Columnstore.xml ]; then
echo "$installdir/etc/Columnstore.xml not found, exiting"
if [ ! -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml ]; then
echo "@ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml not found, exiting"
exit 1
fi
@ -172,7 +172,7 @@ test -d $installdir/data1/systemFiles/dataTransaction || rmdir $installdir/data1
test -d $installdir/data1/systemFiles/dataTransaction/archive || rmdir $installdir/data1/systemFiles/dataTransaction/archive >/dev/null 2>&1
chmod 1755 $installdir/data1 >/dev/null 2>&1
chmod -R 1755 $installdir/data1/systemFiles >/dev/null 2>&1
chmod 1755 $installdir/etc > /dev/null 2>&1
chmod 1755 @ENGINE_SYSCONFDIR@/columnstore > /dev/null 2>&1
#create the bulk-load dirs
mkdir -p $installdir/data/bulk/data/import >/dev/null 2>&1
@ -275,7 +275,7 @@ else
sed -i -e s@prefix=/home/guest@prefix=$prefix@g $installdir/bin/*
chown $user:$user $installdir/etc/Columnstore.xml
chown $user:$user @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml
cat <<EOD
@ -307,7 +307,7 @@ rm -f $lockdir/mysql-Columnstore
rm -f $lockdir/columnstore
#backup copy of Alarm Config File
/bin/cp -f $installdir/etc/AlarmConfig.xml $installdir/etc/AlarmConfig.xml.installSave > /dev/null 2>&1
/bin/cp -f @ENGINE_SYSCONFDIR@/columnstore/AlarmConfig.xml @ENGINE_SYSCONFDIR@/columnstore/AlarmConfig.xml.installSave > /dev/null 2>&1
#check and get amazon env variables
aws=`which aws 2>/dev/null`

View File

@ -62,7 +62,7 @@ log_user $DEBUG
if { $RPMPACKAGE == " " || $PASSWORD == " "} {puts "please enter both RPM and password, enter ./postInstaller.sh -h for additional info"; exit -1}
if { $CONFIGFILE == " " } {
set CONFIGFILE $INSTALLDIR/etc/Columnstore.xml.rpmsave
set CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave
}
if { [catch { open $CONFIGFILE "r"} handle ] } {
puts "Calpont Config file not found: $CONFIGFILE"; exit -1
@ -113,8 +113,8 @@ expect {
}
expect -re "# "
log_user 0
exec mv -f $INSTALLDIR/etc/Columnstore.xml $INSTALLDIR/etc/Columnstore.xml.new > /dev/null 2>&1
exec mv -f $CONFIGFILE $INSTALLDIR/etc/Columnstore.xml > /dev/null 2>&1
exec mv -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.new > /dev/null 2>&1
exec mv -f $CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml > /dev/null 2>&1
send_user "\n"
set timeout 380

View File

@ -130,10 +130,10 @@ fi
if [ $quiet != 1 ]; then
#make copy of Columnstore.xml
/bin/cp -f $installdir/etc/Columnstore.xml $installdir/etc/Columnstore.xml.rpmsave > /dev/null 2>&1
/bin/cp -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave > /dev/null 2>&1
/bin/cp -f $installdir/mysql/my.cnf $installdir/mysql/my.cnf.rpmsave > /dev/null 2>&1
cp $installdir/bin/myCnf-include-args.text $installdir/bin/myCnf-include-args.text.rpmsave >& /dev/null
rm -f $installdir/etc/AlarmConfig.xml.installSave
rm -f @ENGINE_SYSCONFDIR@/columnstore/AlarmConfig.xml.installSave
fi
#remove OAMdbrootCheck file

View File

@ -191,7 +191,7 @@ checkSyslog
if [ ! -z "$syslog_conf" ] ; then
$installdir/bin/setConfig -d Installation SystemLogConfigFile ${syslog_conf} >/dev/null 2>&1
if [ $non_root_user == "yes" ]; then
chown $user:$user $installdir/etc/Columnstore.xml*
chown $user:$user @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml*
fi
rm -f ${syslog_conf}.columnstoreSave

View File

@ -105,34 +105,13 @@ void handleControlC(int i)
Oam::Oam()
{
// Assigned pointers to Config files
string calpontfiledir;
const char* cf = 0;
InstallDir = startup::StartUp::installDir();
calpontfiledir = InstallDir + "/etc";
//FIXME: we should not use this anymore. Everything should be based off the install dir
//If CALPONT_HOME is set, use it for etc directory
#ifdef _MSC_VER
cf = 0;
string cfStr = IDBreadRegistry("CalpontHome");
CalpontConfigFile = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml";
if (!cfStr.empty())
cf = cfStr.c_str();
AlarmConfigFile = std::string(MCSSYSCONFDIR) + "/columnstore/AlarmConfig.xml";
#else
cf = getenv("CALPONT_HOME");
#endif
if (cf != 0 && *cf != 0)
calpontfiledir = cf;
CalpontConfigFile = calpontfiledir + "/Columnstore.xml";
AlarmConfigFile = calpontfiledir + "/AlarmConfig.xml";
ProcessConfigFile = calpontfiledir + "/ProcessConfig.xml";
ProcessConfigFile = std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml";
if (UseHdfs == 0)
{

View File

@ -1,3 +1,3 @@
install(PROGRAMS functions test-001.sh test-002.sh test-003.sh test-004.sh DESTINATION ${ENGINE_POSTDIR} COMPONENT platform)
install(PROGRAMS functions DESTINATION ${ENGINE_POSTDIR} COMPONENT platform)

View File

@ -1,87 +0,0 @@
#!/bin/bash
#
# $Id: test-001.sh 3704 2013-08-07 03:33:20Z bwilkinson $
prefix=/usr/local
USER=`whoami 2>/dev/null`
if [ $USER != "root" ]; then
prefix=$HOME
fi
if [ $USER != "root" ]; then
if [ -f $prefix/.bash_profile ]; then
profileFile=$prefix/.bash_profile
elif [ -f $prefix/.profile ]; then
profileFile=$prefix/.profile
else
profileFile=$prefix/.bashrc
fi
. $profileFile
fi
# Source function library.
if [ -f /etc/init.d/functions ]; then
. /etc/init.d/functions
fi
if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then
COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore
fi
export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR
test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions
scrname=`basename $0`
tname="check-syscat-oids"
mt=`module_type`
#These tests only for PM
if [ "$mt" != "pm" ]; then
exit 0
fi
#check for dbrm and data1, don't run if missing both
if firstboot; then
if [ -d $COLUMNSTORE_INSTALL_DIR/data1/000.dir ]; then
cplogger -c 50 $scrname "$tname" "missing dbrm data with existing 000.dir"
exit 1
else
exit 0
fi
else
#check for oidbitmap file
if oidbitmapfile; then
cplogger -c 50 $scrname "$tname" "missing oidbitmapfile with existing current file"
exit 1
fi
fi
#check for both current file and OIDBITMAP file
#Make sure all syscat OIDs are present (N.B. only works for shared-everything)
cplogger -i 48 $scrname "$tname"
catoids=
catoids="$catoids 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010"
catoids="$catoids 2001 2004"
catoids="$catoids 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040"
catoids="$catoids 2061 2064 2067 2070 2073 2076"
# TODO-this doesn't work with HDFS file system
#for oid in $catoids; do
# if [ ! -s `oid2file $oid` ]; then
# cplogger -c 50 $scrname "$tname" "could not find file for OID $oid"
# exit 1
# fi
#done
cplogger -i 52 $scrname "$tname"
exit 0

View File

@ -1,64 +0,0 @@
#!/bin/bash
#
# $Id: test-002.sh 2937 2012-05-30 18:17:09Z rdempsey $
prefix=/usr/local
USER=`whoami 2>/dev/null`
if [ $USER != "root" ]; then
prefix=$HOME
fi
if [ $USER != "root" ]; then
if [ -f $prefix/.bash_profile ]; then
profileFile=$prefix/.bash_profile
elif [ -f $prefix/.profile ]; then
profileFile=$prefix/.profile
else
profileFile=$prefix/.bashrc
fi
. $profileFile
fi
# Source function library.
if [ -f /etc/init.d/functions ]; then
. /etc/init.d/functions
fi
if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then
COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore
fi
export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR
test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions
scrname=`basename $0`
tname="check-brm"
#Don't run on first boot
if firstboot; then
exit 0
fi
#Make sure BRM is read-write
cplogger -i 48 $scrname "$tname"
#turn this test off for now...it doesn't if the DBRM isn't started, and these tests run too early
# we need a way to run some tests at different stages of system startup...
#dbrmctl status 2>&1 | egrep -qsi '^ok'
/bin/true
rc=$?
if [ $rc -ne 0 ]; then
cplogger -c 50 $scrname "$tname" "the BRM is read only"
exit 1
fi
cplogger -i 52 $scrname "$tname"
exit 0

View File

@ -1,66 +0,0 @@
#!/bin/bash
#
# $Id: test-003.sh 2937 2012-05-30 18:17:09Z rdempsey $
prefix=/usr/local
USER=`whoami 2>/dev/null`
if [ $USER != "root" ]; then
prefix=$HOME
fi
if [ $USER != "root" ]; then
if [ -f $prefix/.bash_profile ]; then
profileFile=$prefix/.bash_profile
elif [ -f $prefix/.profile ]; then
profileFile=$prefix/.profile
else
profileFile=$prefix/.bashrc
fi
. $profileFile
fi
# Source function library.
if [ -f /etc/init.d/functions ]; then
. /etc/init.d/functions
fi
if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then
COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore
fi
export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR
test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions
scrname=`basename $0`
tname="check-oid-bitmap"
#Don't run on first boot
if firstboot; then
exit 0
fi
#Make sure there is an oid bitmap file if there are any EM entries
cplogger -i 48 $scrname "$tname"
obmfile=$(getConfig OIDManager OIDBitmapFile)
emcnt=$(editem -o 2001 | wc -l)
rc=1
if [ -f $obmfile -o $emcnt -eq 0 ]; then
rc=0
fi
if [ $rc -ne 0 ]; then
cplogger -c 50 $scrname "$tname" "there is no OID bitmap file but there are Extent Map entires"
exit 1
fi
cplogger -i 52 $scrname "$tname"
exit 0

View File

@ -1,70 +0,0 @@
#!/bin/bash
#
# $Id: test-004.sh 1538 2009-07-22 18:57:04Z dhill $
#
# Validates that FilesPerColumnPartition setting is not set lower than existing extents.
#
prefix=/usr/local
USER=`whoami 2>/dev/null`
if [ $USER != "root" ]; then
prefix=$HOME
fi
if [ $USER != "root" ]; then
if [ -f $prefix/.bash_profile ]; then
profileFile=$prefix/.bash_profile
elif [ -f $prefix/.profile ]; then
profileFile=$prefix/.profile
else
profileFile=$prefix/.bashrc
fi
. $profileFile
fi
# Source function library.
if [ -f /etc/init.d/functions ]; then
. /etc/init.d/functions
fi
if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then
COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore
fi
export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DI
export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR
test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions
scrname=`basename $0`
tname="validate-partition-size"
#Don't run on first boot
if firstboot; then
exit 0
fi
exit 0
cplogger -i 48 $scrname "$tname"
# Get the FilesPerColumnPartition setting from Columnstore.xml.
filesPer=$(getConfig ExtentMap FilesPerColumnPartition)
# Get the maximum segment number for all column files.
maxSeg=$(editem -i | awk -F '|' -v max=0 '{if($7>max)max=$7}END{print max+1}')
# Error and out if the maximum existing segment number is higher than the FilesPerColumnPartition setting.
if [ $maxSeg -gt $filesPer ]; then
cplogger -c 50 $scrname "$tname" "One or more tables were populated with FilesPerColumnPartition higher than the current setting."
exit 1
fi
cplogger -i 52 $scrname "$tname"
exit 0

View File

@ -29,6 +29,7 @@
#include <netdb.h>
#include <readline/readline.h>
#include "config.h"
#include "liboamcpp.h"
#include "configcpp.h"
#include "installdir.h"
@ -326,11 +327,11 @@ void reportThread(string reporttype)
system(cmd.c_str());
cmd = "echo ' ' >> " + outputFile;
system(cmd.c_str());
cmd = "echo '################# cat /etc/Columnstore.xml ################# ' >> " + outputFile;
cmd = "echo '################# cat /etc/columnstore/Columnstore.xml ################# ' >> " + outputFile;
system(cmd.c_str());
cmd = "echo ' ' >> " + outputFile;
system(cmd.c_str());
cmd = "cat " + installDir + "/etc/Columnstore.xml >> " + outputFile;
cmd = "cat " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml >> " + outputFile;
system(cmd.c_str());
}

View File

@ -28,10 +28,12 @@ extern int h_errno;
#include "columnstoreversion.h"
#include "mcsadmin.h"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/path.hpp"
#include "boost/scoped_ptr.hpp"
#include "boost/tokenizer.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/tokenizer.hpp>
#include "config.h"
#include "sessionmanager.h"
#include "dbrm.h"
#include "messagequeue.h"
@ -200,7 +202,7 @@ int main(int argc, char* argv[])
tmpDir = startup::StartUp::tmpDir();
installDir = startup::StartUp::installDir();
string cf = installDir + "/etc/" + ConsoleCmdsFile;
string cf = std::string(MCSSYSCONFDIR) + "/columnstore/" + ConsoleCmdsFile;
fConfig = Config::makeConfig(cf);
// setupSignalHandlers();
@ -9497,7 +9499,7 @@ void printModuleDisk(ModuleDisk moduledisk)
cout << "Mount Point Total Blocks Used Blocks Usage %" << endl;
cout << "----------------------------- ------------ ------------ -------" << endl;
string etcdir = installDir + "/etc";
string etcdir = std::string(MCSSYSCONFDIR) + "/columnstore";
for ( unsigned int i = 0 ; i < moduledisk.diskusage.size(); i++)
{
@ -9538,7 +9540,7 @@ void printModuleDisk(ModuleDisk moduledisk)
void printModuleResources(TopProcessCpuUsers topprocesscpuusers, ModuleCpu modulecpu, TopProcessMemoryUsers topprocessmemoryusers, ModuleMemory modulememory, ModuleDisk moduledisk)
{
Oam oam;
string etcdir = installDir + "/etc";
string etcdir = std::string(MCSSYSCONFDIR) + "/columnstore";
cout << endl << "Module '" + topprocesscpuusers.ModuleName + "' Resource Usage" << endl << endl;

View File

@ -476,8 +476,8 @@ int main(int argc, char* argv[])
}
//backup current Columnstore.xml
string configFile = installDir + "/etc/Columnstore.xml";
string saveFile = installDir + "/etc/Columnstore.xml.save";
string configFile = MCSSYSCONFDIR + "/columnstore/Columnstore.xml";
string saveFile = MCSSYSCONFDIR + "/columnstore/Columnstore.xml.save";
string cmd = "rm -f " + saveFile;
system(cmd.c_str());
cmd = "cp " + configFile + " " + saveFile;
@ -2427,9 +2427,9 @@ int main(int argc, char* argv[])
}
//copy Columnstore.xml to Columnstore.xml.rpmsave for postConfigure no-prompt option
cmd = "rm -f " + installDir + "/etc/Columnstore.xml.rpmsave";
cmd = "rm -f " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml.rpmsave";
system(cmd.c_str());
cmd = "cp " + installDir + "/etc/Columnstore.xml " + installDir + "/etc/Columnstore.xml.rpmsave";
cmd = "cp " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml.rpmsave";
int rtnCode = system(cmd.c_str());
if (WEXITSTATUS(rtnCode) != 0)

View File

@ -25,6 +25,7 @@
#include <readline/readline.h>
#include "config.h"
#include "configcpp.h"
using namespace config;
@ -100,7 +101,7 @@ bool waitForActive()
void dbrmDirCheck()
{
const string fname = installDir + "/etc/Columnstore.xml.rpmsave";
const string fname = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave";
ifstream oldFile (fname.c_str());
if (!oldFile) return;

View File

@ -47,6 +47,7 @@
#include <netdb.h>
#include <sys/sysinfo.h>
#include "config.h"
#include "liboamcpp.h"
#include "configcpp.h"
@ -202,9 +203,9 @@ int main(int argc, char* argv[])
//copy Columnstore.xml.rpmsave if upgrade option is selected
if ( installType == "upgrade" )
{
cmd = "/bin/cp -f " + installDir + "/etc/Columnstore.xml " + installDir + "/etc/Columnstore.xml.new 2>&1";
cmd = "/bin/cp -f " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.new 2>&1";
system(cmd.c_str());
cmd = "/bin/cp -f " + installDir + "/etc/Columnstore.xml.rpmsave " + installDir + "/etc/Columnstore.xml 2>&1";
cmd = "/bin/cp -f " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml 2>&1";
system(cmd.c_str());
}
@ -1157,7 +1158,7 @@ bool updateProcessConfig(int serverTypeInstall)
}
}
string fileName = installDir + "/etc/ProcessConfig.xml";
string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml";
//Save a copy of the original version
cmd = "/bin/cp -f " + fileName + " " + fileName + ".columnstoreSave > /dev/null 2>&1";
@ -1351,7 +1352,7 @@ bool makeRClocal(string moduleName, int IserverTypeInstall)
*/
bool uncommentCalpontXml( string entry)
{
string fileName = installDir + "/etc/Columnstore.xml";
string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml";
ifstream oldFile (fileName.c_str());

View File

@ -65,10 +65,11 @@
#include <readline/readline.h>
#include <readline/history.h>
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/path.hpp"
#include "boost/tokenizer.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/tokenizer.hpp>
#include "config.h"
#include "columnstoreversion.h"
#include "liboamcpp.h"
#include "configcpp.h"
@ -559,7 +560,7 @@ int main(int argc, char* argv[])
}
if ( oldFileName == oam::UnassignedName )
oldFileName = installDir + "/etc/Columnstore.xml.rpmsave";
oldFileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave";
cout << endl;
cout << "This is the MariaDB ColumnStore System Configuration and Installation tool." << endl;
@ -4281,8 +4282,8 @@ int main(int argc, char* argv[])
*/
bool checkSaveConfigFile()
{
string rpmFileName = installDir + "/etc/Columnstore.xml";
string newFileName = installDir + "/etc/Columnstore.xml.new";
string rpmFileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml";
string newFileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.new";
string extentMapCheckOnly = " ";
@ -4432,7 +4433,7 @@ bool checkSaveConfigFile()
return false;
}
cmd = "cd " + installDir + "/etc/;../bin/autoConfigure " + extentMapCheckOnly;
cmd = "cd " + std::string(MCSSYSCONFDIR) + "/columnstore;" + installDir + "/bin/autoConfigure " + extentMapCheckOnly;
rtnCode = system(cmd.c_str());
if (WEXITSTATUS(rtnCode) != 0)
@ -4567,7 +4568,7 @@ bool updateProcessConfig()
string newModule = ">pm";
oldModule.push_back(">um");
string fileName = installDir + "/etc/ProcessConfig.xml";
string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml";
//Save a copy of the original version
string cmd = "/bin/cp -f " + fileName + " " + fileName + ".columnstoreSave > /dev/null 2>&1";
@ -4639,7 +4640,7 @@ bool updateProcessConfig()
*/
bool uncommentCalpontXml( string entry)
{
string fileName = installDir + "/etc/Columnstore.xml";
string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml";
ifstream oldFile (fileName.c_str());
@ -5314,7 +5315,7 @@ bool storageSetup(bool amazonInstall)
hadoopInstalled = "y";
// check whether StorageManager is installed
Config *processConfig = Config::makeConfig((installDir + "/etc/ProcessConfig.xml").c_str());
Config *processConfig = Config::makeConfig((std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml").c_str());
string storageManagerLocation;
bool storageManagerInstalled = false;
// search the 'PROCESSCONFIG#' entries for the StorageManager entry

View File

@ -25,6 +25,7 @@
#include <cassert>
#include "columnstoreversion.h"
#include "config.h"
#include "processmanager.h"
#include "installdir.h"
#include "dbrm.h"
@ -8778,7 +8779,7 @@ void ProcessManager::clearNICAlarms(std::string hostName)
******************************************************************************************/
bool ProcessManager::updateExtentMap()
{
string fileName = startup::StartUp::installDir() + "/etc/Columnstore.xml";
string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml";
ifstream oldFile (fileName.c_str());
@ -9018,7 +9019,7 @@ int ProcessManager::distributeConfigFile(std::string name, std::string file)
log.writeLog(__LINE__, "distributeConfigFile called for " + name + " file = " + file, LOG_TYPE_DEBUG);
string dirName = startup::StartUp::installDir() + "/etc/";
string dirName = std::string(MCSSYSCONFDIR) + "/columnstore/";
string fileName = dirName + file;
ifstream in (fileName.c_str());
@ -10204,7 +10205,7 @@ int ProcessManager::OAMParentModuleChange()
else
{
// update the Columnstore.xml with the new IP Address
string cmd = "sed -i s/" + downOAMParentIPAddress + "/" + currentIPAddr + "/g " + startup::StartUp::installDir() + "/etc/Columnstore.xml";
string cmd = "sed -i s/" + downOAMParentIPAddress + "/" + currentIPAddr + "/g " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml";
system(cmd.c_str());
// get parent hotsname and IP address in case it changed

View File

@ -28,6 +28,7 @@
#include <boost/uuid/uuid_io.hpp>
#include "columnstoreversion.h"
#include "config.h"
#include "IDBDataFile.h"
#include "IDBPolicy.h"
#include "processmonitor.h"
@ -5885,7 +5886,7 @@ bool ProcessMonitor::amazonIPCheck()
log.writeLog(__LINE__, "Module is Running: '" + moduleName + "' / Instance '" + instanceID + "' current IP being reconfigured in Columnstore.xml. old = " + IPAddr + ", new = " + currentIPAddr, LOG_TYPE_DEBUG);
// update the Columnstore.xml with the new IP Address
string cmd = "sed -i s/" + IPAddr + "/" + currentIPAddr + "/g /usr/local/mariadb/columnstore/etc/Columnstore.xml";
string cmd = "sed -i s/" + IPAddr + "/" + currentIPAddr + "/g " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml";
system(cmd.c_str());
}
else

View File

@ -157,6 +157,6 @@ install(PROGRAMS
)
install(FILES storagemanager.cnf
DESTINATION ${INSTALL_ENGINE}/etc
DESTINATION ${ENGINE_SYSCONFDIR}/columnstore
COMPONENT platform)

View File

@ -17,6 +17,10 @@
#include "Config.h"
// This one is the build system config
#include "config.h"
#include <boost/thread/mutex.hpp>
#include <boost/property_tree/ini_parser.hpp>
#include <boost/filesystem.hpp>
@ -69,7 +73,7 @@ Config::Config() : die(false)
// the paths to search in order
paths.push_back(".");
if (cs_install_dir)
paths.push_back(string(cs_install_dir) + "/etc");
paths.push_back(string(MCSSYSCONFDIR) + "/columnstore");
paths.push_back("/etc");
for (uint i = 0; i < paths.size(); i++)

View File

@ -39,6 +39,7 @@
#include <readline/readline.h>
#include <readline/history.h>
#include "config.h"
#include "liboamcpp.h"
#include "configcpp.h"
@ -623,7 +624,7 @@ int main(int argc, char* argv[])
for ( int retry = 0 ; retry < 5 ; retry++ )
{
cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + installDir + "" + installLocation + "/etc/Columnstore.xml " + systemUser + " " + debug_flag;
cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml " + systemUser + " " + debug_flag;
rtnCode = system(cmd.c_str());
sleep(2);
@ -685,7 +686,7 @@ int main(int argc, char* argv[])
RPMSAVE:
//try Columnstore.xml.rpmsave
cout << "Get System Columnstore.xml.rpmsave " << flush;
cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + installDir + "" + installLocation + "/etc/Columnstore.xml.rpmsave " + systemUser + " " + debug_flag;
cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave " + systemUser + " " + debug_flag;
rtnCode = system(cmd.c_str());
if (rtnCode == 0)

View File

@ -1,219 +0,0 @@
#!/usr/bin/expect
#
# $Id: beetlejuice_installer.sh 421 2007-04-05 15:46:55Z dhill $
#
# Beetlejuice Installer
# Argument 0 - Server IP address
# Argument 1 - Root Password
# Argument 2 - Debug flag 1 for on, 0 for off
set timeout 30
set USERNAME root
set SERVER [lindex $argv 0]
set PASSWORD [lindex $argv 1]
set PACKAGE [lindex $argv 2]
set RELEASE [lindex $argv 3]
set DEBUG [lindex $argv 4]
log_user $DEBUG
spawn -noecho /bin/bash
#
# get the package
#
send_user "Get Calpont Package "
send "rm -f $PACKAGE\n"
#expect -re "#"
send "smbclient //cal6500/shared -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $PACKAGE'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" { send_user "FAILED: $PACKAGE not found in //cal6500/shared/Iterations/$RELEASE/\n" ; exit -1 }
-re "getting" { send_user "DONE" } abort
}
send_user "\n"
#
# send the DM package
#
expect -re "#"
send_user "Copy Calpont Package "
send "ssh $USERNAME@$SERVER 'rm -f /root/calpont*.rpm'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
expect -re "#"
send "scp $PACKAGE $USERNAME@$SERVER:/root/.\n"
expect {
-re "authenticity" { send "yes\n"
expect {
-re "word: " { send "$PASSWORD\n" } abort
}
}
-re "service not known" { send_user "FAILED: Invalid Host\n" ; exit -1 }
-re "word: " { send "$PASSWORD\n" } abort
}
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 }
}
send "rm -f $PACKAGE\n"
#
# backup custom os files
#
send_user "\n"
expect -re "#"
send_user "Backup Custom OS Files "
send "ssh $USERNAME@$SERVER 'rm -f /etc/*.calpont;cp /etc/inittab /etc/inittab.calpont;cp /etc/syslog.conf /etc/syslog.conf.calpont'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
-re "cp" { send_user "FAILED" ; exit -1 }
}
send_user "\n"
#
# unmount disk
#
expect -re "#"
send_user "Unmount disk "
send "ssh $USERNAME@$SERVER 'umount -a'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
}
send_user "\n"
#
# erase package
#
expect -re "#"
send_user "Erase Old Calpont-oracle Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps calpont-oracle'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "#" { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
send_user "Erase Old Calpont-Mysql Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps calpont-mysql'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "#" { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
send_user "Erase Old Calpont Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps calpont'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "#" { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
#
# install package
#
expect -re "#"
set timeout 120
send_user "Install New Calpont Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$PACKAGE'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
#
# Restore custom os files
#
set timeout 30
expect -re "#"
send_user "Restore Custom OS Files "
send "ssh $USERNAME@$SERVER 'mv -f /etc/inittab.calpont /etc/inittab;mv -f /etc/syslog.conf.calpont /etc/syslog.conf'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
-re "mv: cannot" { send_user "FAILED" ; exit -1 }
}
send_user "\n"
#
# mount disk
#
expect -re "#"
send_user "Mount disk "
send "ssh $USERNAME@$SERVER 'mount -a'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
}
send_user "\n"
#
# restart syslog
#
expect -re "#"
send_user "Restart syslog service "
send "ssh $USERNAME@$SERVER 'service syslog restart'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "Starting kernel logger" { send_user "DONE" } abort
-re "service " { send_user "WARNING: service not available" } abort
}
send_user "\n"
#
# startup ProcMons
#
expect -re "#"
send_user "Startup ProcMon's "
send "ssh $USERNAME@$SERVER 'kill -HUP 1'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
}
send_user "\n"
#
exit

View File

@ -1,259 +0,0 @@
#!/usr/bin/expect
#
# $Id: beetlejuice_installer.sh 421 2007-04-05 15:46:55Z dhill $
#
# Beetlejuice Installer
# Argument 0 - Server IP address
# Argument 1 - Root Password
# Argument 2 - Debug flag 1 for on, 0 for off
set timeout 30
set USERNAME root
set SERVER [lindex $argv 0]
set PASSWORD [lindex $argv 1]
set SYSTEMRPM [lindex $argv 2]
set CALPONTRPMNAME [lindex $argv 3]
set CONNECTORRPM1NAME [lindex $argv 4]
set CONNECTORRPM2NAME [lindex $argv 5]
set RELEASE [lindex $argv 6]
set DEBUG [lindex $argv 7]
set CALPONTRPM $CALPONTRPMNAME"-1"$SYSTEMRPM
set CONNECTORRPM1 $CONNECTORRPM1NAME"-1"$SYSTEMRPM
set CONNECTORRPM2 $CONNECTORRPM2NAME"-1"$SYSTEMRPM
#set SHARED "//cal6500/shared"
set SHARED "//calweb/shared"
log_user $DEBUG
spawn -noecho /bin/bash
#
# get the package
#
send_user "Get Calpont Packages "
send "rm -f $SYSTEMRPM\n"
#expect -re "#"
send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $SYSTEMRPM'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" { send_user "FAILED: $SYSTEMRPM not found in $SHARED/Iterations/$RELEASE/\n" ; exit -1 }
-re "getting" { send_user "DONE" } abort
}
send_user "\n"
#
# send the DM Package
#
expect -re "#"
send_user "Copy Calpont Packages "
send "ssh $USERNAME@$SERVER 'rm -f /root/$SYSTEMRPM'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
expect -re "#"
send "scp $SYSTEMRPM $USERNAME@$SERVER:/root/.\n"
expect {
-re "authenticity" { send "yes\n"
expect {
-re "word: " { send "$PASSWORD\n" } abort
}
}
-re "service not known" { send_user "FAILED: Invalid Host\n" ; exit -1 }
-re "word: " { send "$PASSWORD\n" } abort
}
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid Package\n" ; exit -1 }
}
send "rm -f $SYSTEMRPM\n"
#
# backup custom os files
#
send_user "\n"
expect -re "#"
send_user "Backup Custom OS Files "
send "ssh $USERNAME@$SERVER 'rm -f /etc/*.calpont;cp /etc/inittab /etc/inittab.calpont;cp /etc/syslog.conf /etc/syslog.conf.calpont'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
-re "cp" { send_user "FAILED" ; exit -1 }
}
send_user "\n"
#
# unmount disk
#
expect -re "#"
send_user "Unmount disk "
send "ssh $USERNAME@$SERVER 'umount -a'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
}
send_user "\n"
#
# erase Package
#
expect -re "#"
send_user "Erase Old $CONNECTORRPM1NAME Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps $CONNECTORRPM1NAME'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "#" { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
send_user "Erase Old $CONNECTORRPM2NAME Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps $CONNECTORRPM2NAME'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "#" { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
send_user "Erase Old $CALPONTRPMNAME Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps $CALPONTRPMNAME'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "#" { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
#
# install Package
#
expect -re "#"
set timeout 120
send_user "Install New $CALPONTRPMNAME Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CALPONTRPM'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
send_user "Install New $CONNECTORRPM1NAME Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CONNECTORRPM1'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
expect -re "#"
send_user "Install New $CONNECTORRPM2NAME Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CONNECTORRPM2'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
#
# Restore custom os files
#
set timeout 30
expect -re "#"
send_user "Restore Custom OS Files "
send "ssh $USERNAME@$SERVER 'mv -f /etc/inittab.calpont /etc/inittab;mv -f /etc/syslog.conf.calpont /etc/syslog.conf'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
-re "mv: cannot" { send_user "FAILED" ; exit -1 }
}
send_user "\n"
#
# mount disk
#
expect -re "#"
send_user "Mount disk "
send "ssh $USERNAME@$SERVER 'mount -a'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
}
send_user "\n"
#
# restart syslog
#
expect -re "#"
send_user "Restart syslog service "
send "ssh $USERNAME@$SERVER 'service syslog restart'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "Starting kernel logger" { send_user "DONE" } abort
-re "service " { send_user "WARNING: service not available" } abort
}
send_user "\n"
#
# startup ProcMons
#
expect -re "#"
send_user "Startup ProcMon's "
send "ssh $USERNAME@$SERVER 'kill -HUP 1'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "#" { send_user "DONE" } abort
}
send_user "\n"
#
exit

View File

@ -234,7 +234,7 @@ int main(int argc, char* argv[])
exit (-1);
}
cmd = "./remote_scp_get.sh " + parentOAMModuleIPAddr + " " + password + " /usr/local/mariadb/columnstore/etc/Columnstore.xml 0 ";
cmd = "./remote_scp_get.sh " + parentOAMModuleIPAddr + " " + password + " " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml 0 ";
rtnCode = system(cmd.c_str());
if (rtnCode == 0)

View File

@ -1,365 +0,0 @@
#!/usr/bin/expect
#
# $Id: parent_installer.sh 421 2007-04-05 15:46:55Z dhill $
#
# Parent OAM Installer, copy RPM's and custom OS files from postConfigure script
# Argument 0 - Parent OAM IP address
# Argument 1 - Root Password of Parent OAM Module
# Argument 2 - Calpont Config File
# Argument 3 - Debug flag 1 for on, 0 for off
set timeout 40
set USERNAME root
set SERVER [lindex $argv 0]
set PASSWORD [lindex $argv 1]
set PACKAGE [lindex $argv 2]
set RELEASE [lindex $argv 3]
set CONFIGFILE [lindex $argv 4]
set DEBUG [lindex $argv 5]
set CALPONTPACKAGE infinidb-platform-$PACKAGE
set CALPONTPACKAGE0 infinidb-0$PACKAGE
set CALPONTPACKAGE1 infinidb-1$PACKAGE
set ORACLEPACKAGE infinidb-oracle$PACKAGE
set MYSQLPACKAGE infinidb-storage-engine-$PACKAGE
set MYSQLDPACKAGE infinidb-mysql-$PACKAGE
set SHARED "//calweb/shared"
log_user $DEBUG
spawn -noecho /bin/bash
send "rm -f $PACKAGE,$CALPONTPACKAGE0,$CALPONTPACKAGE1,$ORACLEPACKAGE,$MYSQLPACKAGE,$MYSQLDPACKAGE\n"
#
# delete and erase all old packages from Director Module
#
send "ssh $USERNAME@$SERVER 'rm -f /root/calpont*.rpm'\n"
expect {
-re "authenticity" { send "yes\n"
expect {
-re "word: " { send "$PASSWORD\n" } abort
}
}
-re "service not known" { send_user "FAILED: Invalid Host\n" ; exit -1 }
-re "word: " { send "$PASSWORD\n" } abort
}
expect {
-re "#" { } abort
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
#
# erase calpont-oracle package
#
expect -re "# "
send_user "Erase Old Calpont-Oracle Connector Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches calpont-oracle'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "# " { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
#
# erase infinidb-mysql package
#
expect -re "# "
send_user "Erase Old Calpont-Mysqld Connector Package "
send "ssh $USERNAME@$SERVER 'pkill -9 mysqld'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "# " { } abort
}
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches infinidb-mysql'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "# " { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
#
# erase infinidb-storage-engine package
#
expect -re "# "
send_user "Erase Old Calpont-Mysql Connector Package "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches infinidb-storage-engine'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "# " { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
send "rm -f $PACKAGE\n"
#
# erase calpont package
#
expect -re "# "
send_user "Erase Old Calpont Packages "
send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches infinidb-libs infinidb-platform infinidb-enterprise'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "uninstall completed" { send_user "DONE" } abort
-re "# " { send_user "DONE" } abort
-re "not installed" { send_user "WARNING: Package not installed" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
#
# get the calpont package
#
expect -re "# "
send_user "Get Calpont Package "
send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $CALPONTPACKAGE0'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" {
send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $CALPONTPACKAGE1'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" { send_user "FAILED: $CALPONTPACKAGE not found\n" ; exit -1 }
-re "getting" { send_user "DONE" } abort
}
}
-re "getting" { send_user "DONE" } abort
}
send_user "\n"
#
# send the calpont package
#
send_user "Copy Calpont Package "
send "scp $CALPONTPACKAGE $USERNAME@$SERVER:/root/.\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 }
}
send_user "\n"
send "rm -f $PACKAGE\n"
#
# install calpont package
#
expect -re "# "
set timeout 120
send_user "Install New Calpont Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CALPONTPACKAGE'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
set timeout 40
expect -re "# "
send "rm -f $PACKAGE\n"
#
if { $CONFIGFILE != "NULL"} {
#
# copy over Columnstore.xml file
#
send_user "Copy Calpont Configuration File "
send "scp $CONFIGFILE $USERNAME@$SERVER:/usr/local/mariadb/columnstore/etc/Columnstore.xml\n"
expect -re "word: "
# send the password
send "$PASSWORD\n"
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 }
}
} else {
#
# rename previous installed config file
#
send_user "Copy RPM-saved Calpont Configuration File "
send "ssh $USERNAME@$SERVER 'cd /usr/local/mariadb/columnstore/etc/;mv -f Columnstore.xml Columnstore.xml.install;cp -v Columnstore.xml.rpmsave Columnstore.xml'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "Columnstore.xml" { send_user "DONE" } abort
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
}
send_user "\n"
#
# get the calpont-oracle package
#
set timeout 40
expect -re "# "
send_user "Get Calpont-Oracle Connector Package "
send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $ORACLEPACKAGE'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" { send_user "WARNING: $ORACLEPACKAGE not found, skipping\n" } abort
-re "getting" { send_user "DONE\n"
#
# send the calpont-oracle package
#
expect -re "# "
send_user "Copy Calpont-Oracle Connector Package "
send "scp $ORACLEPACKAGE $USERNAME@$SERVER:/root/.\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 }
}
#
# install calpont-oracle package
#
send_user "\n"
expect -re "# "
set timeout 120
send_user "Install Calpont-Oracle Connector Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$ORACLEPACKAGE'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
}
}
set timeout 40
expect -re "# "
#
# get the calpont-mysql package
#
send_user "Get Calpont-Mysql Connector Package "
send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $MYSQLPACKAGE'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" { send_user "WARNING: $MYSQLPACKAGE not found, skipping\n" } abort
-re "getting" { send_user "DONE\n"
#
# send the calpont-mysql package
#
expect -re "# "
send_user "Copy Calpont-Mysql Connector Package "
send "scp $MYSQLPACKAGE $USERNAME@$SERVER:/root/.\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 }
}
#
# install calpont-mysql package
#
send_user "\n"
expect -re "# "
set timeout 120
send_user "Install Calpont-Mysql Connector Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh $MYSQLPACKAGE'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
}
}
expect -re "# "
#
# get the infinidb-mysql package
#
send_user "Get Calpont-Mysqld Package "
send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $MYSQLDPACKAGE'\n"
expect {
-re "NT_STATUS_NO_SUCH_FILE" { send_user "WARNING: $MYSQLDPACKAGE not found, skipping\n" } abort
-re "getting" { send_user "DONE\n"
#
# send the infinidb-mysql package
#
expect -re "# "
send_user "Copy Calpont-Mysqld Package "
send "scp $MYSQLDPACKAGE $USERNAME@$SERVER:.\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "100%" { send_user "DONE" } abort
-re "scp" { send_user "FAILED\n" ;
send_user "\n*** Installation Failed\n" ;
exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
-re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 }
}
#
# install infinidb-mysql-mysqld package
#
send_user "\n"
expect -re "# "
set timeout 120
send_user "Install Calpont-Mysqld Package "
send "ssh $USERNAME@$SERVER ' rpm -ivh $MYSQLDPACKAGE'\n"
expect -re "word: "
# password for ssh
send "$PASSWORD\n"
# check return
expect {
-re "completed" { send_user "DONE" } abort
-re "Failed dependencies" { send_user "FAILED: Failed dependencies" ; exit -1 }
-re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 }
}
send_user "\n"
}
}
#
exit

View File

@ -28,6 +28,7 @@ using namespace std;
#include <boost/algorithm/string.hpp>
#include "config.h"
#include "dbbuilder.h"
#include "systemcatalog.h"
#include "liboamcpp.h"
@ -227,7 +228,7 @@ int main(int argc, char* argv[])
//@bug5554, make sure IDBPolicy matches the Columnstore.xml config
try
{
string calpontConfigFile(startup::StartUp::installDir() + "/etc/Columnstore.xml");
string calpontConfigFile(std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml");
config::Config* sysConfig = config::Config::makeConfig(calpontConfigFile.c_str());
string tmp = sysConfig->getConfig("Installation", "DBRootStorageType");

View File

@ -1,47 +0,0 @@
include_directories(${KDE4_INCLUDES} ${KDE4_INCLUDE_DIR} ${QT_INCLUDES} )
########### next target ###############
set(evalidx_SRCS evalidx.cpp)
kde4_add_executable(evalidx ${evalidx_SRCS})
target_link_libraries(evalidx ${KDE4_KDECORE_LIBS} dmlpackageproc execplan joblist rowgroup writeengine brm dataconvert cacheutils dmlpackage messageqcpp loggingcpp configcpp rwlock @boost_thread_lib@ xml2 joiner oamcpp snmpmanager @boost_filesystem_lib@ @boost_date_time_lib@ multicast funcexp)
install(TARGETS evalidx ${INSTALL_TARGETS_DEFAULT_ARGS})
########### install files ###############
#original Makefile.am contents follow:
## $Id: Makefile.am 333 2009-04-03 20:35:04Z rdempsey $
### Process this file with automake to produce Makefile.in
#
#AM_CPPFLAGS = $(idb_cppflags)
#AM_CFLAGS = $(idb_cflags)
#AM_CXXFLAGS = $(idb_cxxflags)
#AM_LDFLAGS = $(idb_ldflags)
#bin_PROGRAMS = evalidx
#evalidx_SOURCES = evalidx.cpp
#evalidx_CPPFLAGS = @idb_common_includes@ $(AM_CPPFLAGS)
#evalidx_LDFLAGS = @idb_common_ldflags@ -ldmlpackageproc -lexecplan -ljoblist -lrowgroup -lwriteengine -lbrm \
#-ldataconvert -lcacheutils -ldmlpackage -lmessageqcpp -lloggingcpp -lconfigcpp -lrwlock -l@boost_thread_lib@ -lxml2 \
#-ljoiner -loamcpp -lsnmpmanager -l@boost_filesystem_lib@ -l@boost_date_time_lib@ @netsnmp_libs@ -lmulticast -lfuncexp \
#$(AM_LDFLAGS)
#
#test:
#
#coverage:
#
#leakcheck:
#
#docs:
#
#bootstrap: install-data-am
#

View File

@ -1,98 +0,0 @@
#!/usr/bin/python
import os, sys, glob, shutil, xml.dom.minidom
def find_paths():
"""Find DBRoot and BulkRoot."""
try:
config_file = os.environ['COLUMNSTORE_CONFIG_FILE']
except KeyError:
try:
config_file = '/usr/local/mariadb/columnstore/etc'
os.lstat(config_file)
except:
sys.exit('No config file available')
xmldoc = xml.dom.minidom.parse(config_file)
bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0]
db_node = xmldoc.getElementsByTagName('DBRoot')[0]
bulk_dir = bulk_node.childNodes[0].nodeValue
data_dir = db_node.childNodes[0].nodeValue
return (bulk_dir, data_dir)
def validate_indexes(job_file):
index_files = []
xmldoc = xml.dom.minidom.parse(job_file)
for index_node in xmldoc.getElementsByTagName('Index'):
curTreeOid = index_node.getAttribute('iTreeOid')
curListOid = index_node.getAttribute('iListOid')
curMapOid = index_node.getAttribute('mapOid')
curIdxCmdArg = ' -t ' + curTreeOid + ' -l ' + curListOid + ' -v -c ' + curMapOid + ' -b 4' + ' > idxCol_' + curMapOid+'.out'
# print curIdxCmd
# exec_cmd( genii + '/tools/evalidx/evalidx', curIdxCmd )
index_files.append( curIdxCmdArg )
return index_files
def exec_cmd(cmd, args):
"""Execute command using subprocess module or if that fails,
use os.system
"""
try:
import subprocess
try:
retcode = call(cmd + " "+args, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
sys.exit(-1)
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
sys.exit(-1)
except:
res = os.system(cmd+' '+args)
if res:
sys.exit( res )
def main():
"""
Validate indexes..
"""
if not os.access('.', os.W_OK):
os.chdir('/tmp')
print 'Changing to /tmp to have permission to write files'
if len(os.getenv('LD_LIBRARY_PATH'))<5:
print 'Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH')
home = os.getenv('HOME')
genii = home+'/genii'
(bulkroot, dbroot) = find_paths()
if len(glob.glob(bulkroot+'/job/Job_300.xml')) == 0:
sys.exit("No Job_300.xml exist ")
indexes = validate_indexes(bulkroot+'/job/Job_300.xml')
for idxCmdArg in indexes:
print idxCmdArg
exec_cmd( genii + '/tools/evalidx/evalidx', idxCmdArg )
## the following line allows either interactive use or module import
if __name__=="__main__": main()

View File

@ -1,388 +0,0 @@
/****************************************************************
* $Id$
*
***************************************************************/
/** @file
* Validataion tool for index validation
*
* This tool is to validate the index tree and list structure. It starts
* from the index tree file, walk through the tree structure until it hits
* a leaf node, then locates the index list block based on the leaf pointer.
* It continues to get all the RIDs for that index key, and also goes to
* the column OID file to validate the column value with the index key.
*/
#include <iostream>
#include <fstream>
#include <cassert>
#include <vector>
#include <algorithm>
#include <iterator>
using namespace std;
#include <unistd.h>
#include "bytestream.h"
using namespace messageqcpp;
#include "dmlpackageprocessor.h"
using namespace dmlpackageprocessor;
#include "writeengine.h"
#include "we_indextree.h"
using namespace WriteEngine;
#include "configcpp.h"
using namespace config;
#include "dm.h"
/** Debug macro */
#define _DEBUG 0
#if _DEBUG
#define DEBUG cout
#else
#define DEBUG if (0) cout
#endif
namespace
{
const streamsize entrysize = sizeof(IdxBitTestEntry);
const streamsize subbloacksize = SUBBLOCK_TOTAL_BYTES;
const streamsize listHdrSize = sizeof(IdxRidListHdr);
uint32_t treeOID, listOID;
uint32_t colOID = 0;
uint32_t columnSize = 0;
ifstream indexTreeFile, indexListFile, columnFile;
bool vFlag = false;
bool nFlag = false;
int64_t keyNumber = 0;
FILE* pFile;
IndexList indexList;
u_int64_t keyvalue;
int totalRids = 0;
void usage()
{
cout << "evalidx [-h] -t OID -l OID [-v -c OID -b colSize -k keyvalue -n ]" << endl;
cout << "\t-h display this help" << endl;
cout << "\t-t OID index tree" << endl;
cout << "\t-l OID index list" << endl;
cout << "\t-v validate index value (need to go with -c and -b)" << endl;
cout << "\t-c OID column" << endl;
cout << "\t-b column size in number of byte (default = 4)" << endl;
cout << "\t-k keyvalue to return index list header for this key" << endl;
cout << "\t-n read RID from tree design" << endl;
}
int oid2file(uint32_t oid, string& filename)
{
//ITER17_Obsolete
// This code and this program is obsolete at this point since we are not
// currently supporting indexes. This function and it's use of getFileName
// needs to be changed, if we ever resurrect this program, since getFileName
// now normally requires the DBRoot, partition, and segment number in
// addition to the OID.
#if 0
FileOp fileOp;
char file_name[WriteEngine::FILE_NAME_SIZE];
if (fileOp.getFileName(oid, file_name) == WriteEngine::NO_ERROR)
{
filename = file_name;
return 0;
}
else
{
cerr << "WriteEngine::FileOp::getFileName() error!" << endl;
return -1;
}
#endif
return 0;
}
int validateValue(WriteEngine::RID rid, int64_t key)
{
int64_t byteoffset = rid * columnSize;
ByteStream::byte inbuf[columnSize];
int64_t colVal = 0;
columnFile.seekg(byteoffset, ios::beg);
columnFile.read(reinterpret_cast<char*>(inbuf), columnSize);
memcpy(&colVal, inbuf, columnSize);
if (key != colVal)
{
cerr << "rowid: " << rid << endl
<< "index: " << key << endl
<< "column: " << colVal << endl;
return 1;
}
return 0;
}
void walkBlock (streamsize byteoffset)
{
int64_t newByteoffset = 0;
int fbo;
int groupNo;
ByteStream::byte inbuf[entrysize];
ByteStream::byte listHdr[listHdrSize];
IdxBitTestEntry* entry;
IdxRidListHdr* hdrEntry;
IdxRidListHdrSize* hdrSize;
// get group number
indexTreeFile.seekg(byteoffset, ios::beg);
indexTreeFile.read(reinterpret_cast<char*>(inbuf), entrysize);
if (indexTreeFile.eof()) return;
entry = (IdxBitTestEntry*) inbuf;
groupNo = entry->group;
// continue to walk next stage if not leaf node for each entry in the group
for (int i = 0; i < 1 << groupNo; i++)
{
indexTreeFile.seekg(byteoffset, ios::beg);
indexTreeFile.read(reinterpret_cast<char*>(inbuf), entrysize);
if (indexTreeFile.eof()) return;
entry = (IdxBitTestEntry*) inbuf;
byteoffset += entrysize;
DEBUG << ": fbo=" << (int)entry->fbo <<
" sbid=" << entry->sbid << " sbentry=" << entry->entry <<
" group=" << entry->group << " bittest=" << entry->bitTest <<
" type=" << entry->type << endl;
if (entry->type == WriteEngine::EMPTY_ENTRY ||
entry->type == WriteEngine::EMPTY_LIST ||
entry->type == WriteEngine::EMPTY_PTR)
continue;
// convert lbid to real fob number
uint16_t dbRoot;
uint32_t partition;
uint16_t segment;
BRMWrapper::getInstance()->getFboOffset(entry->fbo, dbRoot, partition, segment, fbo);
newByteoffset = ((int64_t)fbo) * BLOCK_SIZE + entry->sbid * subbloacksize + entry->entry * entrysize;
if (entry->type > 6)
{
cerr << "invalid type= " << entry->type << endl;
cerr << "fbo= " << fbo << " sbid= " << entry->sbid << " entry= " << entry->entry << endl;
throw runtime_error("invalid type of tree block");
}
// stop walking index tree if leaf node. go walk index list then
if (entry->type == LEAF_LIST)
{
keyNumber++;
IdxEmptyListEntry listPtr;
int size, rc;
CommBlock cbList;
listPtr.fbo = entry->fbo;
listPtr.sbid = entry->sbid;
listPtr.entry = entry->entry;
indexListFile.seekg(newByteoffset, ios::beg);
indexListFile.read(reinterpret_cast<char*>(listHdr), listHdrSize);
hdrEntry = reinterpret_cast<IdxRidListHdr*>(listHdr);
hdrSize = reinterpret_cast<IdxRidListHdrSize*>(listHdr);
DEBUG << "\nkey= " << hdrEntry->key
<< " rowsize= " << hdrSize->size;
// add feather for Jean. print out list header for a given key value
if (keyvalue == hdrEntry->key)
{
cerr << "fbo= " << listPtr.fbo
<< " sbid= " << listPtr.sbid
<< " entry= " << listPtr.entry
<< " key : " << keyvalue << endl;
}
cbList.file.oid = listOID;
cbList.file.pFile = pFile;
//WriteEngine::RID ridArray[MAX_BLOCK_ENTRY*10];
int rSize = 0;
rSize = hdrSize->size;
WriteEngine::RID* ridArray = new WriteEngine::RID[rSize];
size = 0;
if (!nFlag)
rc = indexList.getRIDArrayFromListHdr(cbList, hdrEntry->key, &listPtr, ridArray, size);
else
rc = indexList.getRIDArrayFromListHdrNarray(cbList, hdrEntry->key, &listPtr, ridArray, size, true);
totalRids = totalRids + size;
if (rc)
{
cerr << "Get RID array failed for index block: " << rc << endl;
cerr << "new byte offset= " << newByteoffset << endl;
cerr << "file good? " << indexListFile.good() << endl;
cerr << "fbo= " << listPtr.fbo
<< " sbid= " << listPtr.sbid
<< " entry= " << listPtr.entry << endl;
for (int64_t j = 0; j < size; j++)
cerr << " " << ridArray[j] << endl;
throw runtime_error("Get RID array failed");
}
if (hdrSize->size != static_cast<unsigned int>(size))
{
cerr << "row size not match with list header" << endl;
cerr << "fbo= " << listPtr.fbo
<< " sbid= " << listPtr.sbid
<< " entry= " << listPtr.entry << endl;
for (int64_t j = 0; j < size; j++)
cerr << " " << ridArray[j] << endl;
throw runtime_error("row size not match with list header");
}
for (int64_t j = 0; j < size; j++)
{
DEBUG << " " << ridArray[j] << endl;
// validate column value with the index value
if (vFlag)
idbassert(validateValue(ridArray[j], hdrEntry->key) == 0);
}
delete [] ridArray;
}
else
walkBlock(newByteoffset);
}
}
}
int main(int argc, char* argv[])
{
int c;
int i;
string filename;
while ((c = getopt(argc, argv, "ntlhbcvk")) != EOF)
switch (c)
{
case 't':
treeOID = atoi(argv[optind]);
if (oid2file(treeOID, filename)) return 1;
DEBUG << "tree: " << filename << endl;
indexTreeFile.open(filename.c_str());
break;
case 'l':
listOID = atoi(argv[optind]);
if (oid2file(listOID, filename)) return 1;
DEBUG << "list: " << filename << endl;
indexListFile.open(filename.c_str());
pFile = fopen(filename.c_str(), "rb");
if (!pFile)
{
cerr << "Invalid OID " << listOID << " for index list" << endl;
exit(1);
}
break;
case 'v':
vFlag = true;
break;
case 'c':
colOID = atoi(argv[optind]);
if (oid2file(colOID, filename)) return 1;
DEBUG << "column: " << filename << endl;
columnFile.open(filename.c_str());
break;
case 'b':
columnSize = atoi(argv[optind]);
break;
case 'k':
keyvalue = atoi(argv[optind]);
break;
case 'h':
usage();
return 0;
break;
case 'n':
nFlag = true;
break;
default:
usage();
return 1;
break;
}
if ((argc - optind) < 1)
{
usage();
return 1;
}
if (argc < 5)
{
usage();
return 1;
}
if (vFlag && (colOID == 0 || columnSize == 0))
{
cerr << "Please provide both -c and -b option if -v is indicated." << endl;
usage();
return 1;
}
if (vFlag && !columnFile.good())
{
cerr << "Bad column OID" << endl;
return 1;
}
if (!indexTreeFile.good() || !indexListFile.good())
{
cerr << "Bad index OIDs" << endl;
return 1;
}
// walk through the index tree file
for (i = 0; i < 32; i++)
walkBlock (0 * BLOCK_SIZE + 1 * subbloacksize + i * entrysize);
cout << "\n" << keyNumber << " index value validated!" << endl;
cout << "Total RIDs for this column=" << totalRids << endl;
indexListFile.close();
indexTreeFile.close();
fclose(pFile);
return 0;
}

View File

@ -1,6 +1,7 @@
include_directories( ${ENGINE_COMMON_INCLUDES} )
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/configxml.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/configxml.sh" @ONLY)
########### next target ###############

View File

@ -38,7 +38,7 @@ case "$1" in
echo "Old value of $2 / $3 is $oldvalue"
calxml=$InstallDir/etc/Columnstore.xml
calxml=@ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml
seconds=$(date +%s)
cp $calxml $calxml.$seconds

View File

@ -1,108 +0,0 @@
// A good set of defaults for the dev compile
#ifndef CONFIGCPP_CONFIG_H__
#define CONFIGCPP_CONFIG_H__
#ifndef HAVE_CONFIG_H
#ifndef _MSC_VER
#define HAVE_ALARM 1
#define HAVE_ALLOCA 1
#define HAVE_ALLOCA_H 1
#define HAVE_ARPA_INET_H 1
#define HAVE_DECL_STRERROR_R 1
#define HAVE_DLFCN_H 1
#define HAVE_DUP2 1
#define HAVE_FCNTL_H 1
#define HAVE_FLOOR 1
#define HAVE_FORK 1
#define HAVE_FTIME 1
#define HAVE_FTRUNCATE 1
#define HAVE_GETHOSTBYNAME 1
#define HAVE_GETPAGESIZE 1
#define HAVE_GETTIMEOFDAY 1
#define HAVE_INET_NTOA 1
#define HAVE_INTTYPES_H 1
#define HAVE_ISASCII 1
#define HAVE_LIMITS_H 1
#define HAVE_LOCALTIME_R 1
#define HAVE_MALLOC 1
#define HAVE_MALLOC_H 1
#define HAVE_MBSTATE_T 1
#define HAVE_MEMCHR 1
#define HAVE_MEMMOVE 1
#define HAVE_MEMORY_H 1
#define HAVE_MEMSET 1
#define HAVE_MKDIR 1
#define HAVE_NETDB_H 1
#define HAVE_NETINET_IN_H 1
#define HAVE_POW 1
#define HAVE_PTRDIFF_T 1
#define HAVE_REGCOMP 1
#define HAVE_RMDIR 1
#define HAVE_SELECT 1
#define HAVE_SETENV 1
#define HAVE_SETLOCALE 1
#define HAVE_SOCKET 1
#define HAVE_STDBOOL_H 1
#define HAVE_STDDEF_H 1
#define HAVE_STDINT_H 1
#define HAVE_STDLIB_H 1
#define HAVE_STRCASECMP 1
#define HAVE_STRCHR 1
#define HAVE_STRCSPN 1
#define HAVE_STRDUP 1
#define HAVE_STRERROR 1
#define HAVE_STRERROR_R 1
#define HAVE_STRFTIME 1
#define HAVE_STRINGS_H 1
#define HAVE_STRING_H 1
#define HAVE_STRRCHR 1
#define HAVE_STRSPN 1
#define HAVE_STRSTR 1
#define HAVE_STRTOL 1
#define HAVE_STRTOUL 1
#define HAVE_STRTOULL 1
#define HAVE_SYSLOG_H 1
#define HAVE_SYS_FILE_H 1
#define HAVE_SYS_MOUNT_H 1
#define HAVE_SYS_SELECT_H 1
#define HAVE_SYS_SOCKET_H 1
#define HAVE_SYS_STATFS_H 1
#define HAVE_SYS_STAT_H 1
#define HAVE_SYS_TIMEB_H 1
#define HAVE_SYS_TIME_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_SYS_WAIT_H 1
#define HAVE_UNISTD_H 1
#define HAVE_UTIME 1
#define HAVE_UTIME_H 1
#define HAVE_VALUES_H 1
#define HAVE_VFORK 1
#define HAVE_WORKING_VFORK 1
#define LSTAT_FOLLOWS_SLASHED_SYMLINK 1
//#define PACKAGE "calpont"
//#define PACKAGE_BUGREPORT "support@calpont.com"
//#define PACKAGE_NAME "Calpont"
//#define PACKAGE_STRING "Calpont 1.0.0"
//#define PACKAGE_TARNAME "calpont"
//#define PACKAGE_VERSION "1.0.0"
#define PROTOTYPES 1
#define RETSIGTYPE void
#define SELECT_TYPE_ARG1 int
#define SELECT_TYPE_ARG234 (fd_set *)
#define SELECT_TYPE_ARG5 (struct timeval *)
#define STDC_HEADERS 1
#define STRERROR_R_CHAR_P 1
#define TIME_WITH_SYS_TIME 1
#define VERSION "1.0.0"
#define __PROTOTYPES 1
#define restrict __restrict
#else // _MSC_VER
#endif
#endif //!HAVE_CONFIG_H
#endif //!CONFIGCPP_CONFIG_H__

View File

@ -107,7 +107,7 @@ Config* Config::makeConfig(const char* cf)
if (defaultFilePath.empty())
{
fs::path configFilePath;
configFilePath = fs::path(installDir) / fs::path("etc") / defaultCalpontConfigFile;
configFilePath = fs::path(MCSSYSCONFDIR) / fs::path("columnstore") / defaultCalpontConfigFile;
defaultFilePath = configFilePath.string();
}
@ -383,7 +383,7 @@ void Config::writeConfig(const string& configFile) const
const fs::path saveCalpontConfigFileTemp("Columnstore.xml.columnstoreSave");
const fs::path tmpCalpontConfigFileTemp("Columnstore.xml.temp1");
fs::path etcdir = fs::path(fInstallDir) / fs::path("etc");
fs::path etcdir = fs::path(MCSSYSCONFDIR) / fs::path("columnstore");
fs::path dcf = etcdir / fs::path(defaultCalpontConfigFile);
fs::path dcft = etcdir / fs::path(defaultCalpontConfigFileTemp);

View File

@ -1,232 +0,0 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
// $Id: writeonce.cpp 3495 2013-01-21 14:09:51Z rdempsey $
#include "writeonce.h"
#include <string>
#include <stdexcept>
#include <sstream>
#include <fstream>
#include <errno.h>
#include <unistd.h>
//#define NDEBUG
#include <cassert>
#include <cstring>
using namespace std;
#include <boost/any.hpp>
using namespace boost;
#include "bytestream.h"
using namespace messageqcpp;
#include "installdir.h"
namespace
{
const string DefaultWriteOnceConfigFilename("woparms.dat");
}
namespace config
{
//If you add parm, you need to update all the methods below until the next comment
void WriteOnceConfig::initializeDefaults()
{
string tmpDir = startup::StartUp::tmpDir();
fLBID_Shift = make_pair("13", false);
fDBRootCount = make_pair("1", false);
fDBRMRoot = make_pair("/mnt/OAM/dbrm/BRM_saves", false);
string file = tmpDir + "/ColumnstoreShm";
fSharedMemoryTmpFile1 = make_pair(file, false);
fTxnIDFile = make_pair("/mnt/OAM/dbrm/SMTxnID", false);
file = tmpDir + "/CalpontSessionMonitorShm";
fSharedMemoryTmpFile2 = make_pair(file, false);
}
void WriteOnceConfig::setup()
{
typedef EntryMap_t::value_type VT;
fEntryMap.insert(VT("PrimitiveServers.LBID_Shift", &fLBID_Shift));
fEntryMap.insert(VT("SystemConfig.DBRootCount", &fDBRootCount));
fEntryMap.insert(VT("SystemConfig.DBRMRoot", &fDBRMRoot));
fEntryMap.insert(VT("SessionManager.SharedMemoryTmpFile", &fSharedMemoryTmpFile1));
fEntryMap.insert(VT("SessionManager.TxnIDFile", &fTxnIDFile));
fEntryMap.insert(VT("SessionMonitor.SharedMemoryTmpFile", &fSharedMemoryTmpFile2));
ByteStream ibs = load();
if (ibs.length() > 0)
unserialize(ibs);
else
initializeDefaults();
}
void WriteOnceConfig::serialize(ByteStream& obs) const
{
obs << WriteOnceConfigVersion;
obs << fLBID_Shift.first;
obs << fDBRootCount.first;
obs << fDBRMRoot.first;
obs << fSharedMemoryTmpFile1.first;
obs << fTxnIDFile.first;
obs << fSharedMemoryTmpFile2.first;
}
void WriteOnceConfig::unserialize(ByteStream& ibs)
{
uint32_t version;
ibs >> version;
if (version < WriteOnceConfigVersion)
{
ostringstream oss;
oss << "Invalid version found in WriteOnceConfig file: " << version;
throw runtime_error(oss.str().c_str());
}
else if (version > WriteOnceConfigVersion)
{
ostringstream oss;
oss << "Invalid version found in WriteOnceConfig file: " << version;
throw runtime_error(oss.str().c_str());
}
ibs >> fLBID_Shift.first;
fLBID_Shift.second = true;
ibs >> fDBRootCount.first;
fDBRootCount.second = true;
ibs >> fDBRMRoot.first;
fDBRMRoot.second = true;
ibs >> fSharedMemoryTmpFile1.first;
fSharedMemoryTmpFile1.second = true;
ibs >> fTxnIDFile.first;
fTxnIDFile.second = true;
ibs >> fSharedMemoryTmpFile2.first;
fSharedMemoryTmpFile2.second = true;
}
//End of methods that need to be changed when adding parms
ByteStream WriteOnceConfig::load()
{
ByteStream bs;
if (access(fConfigFileName.c_str(), F_OK) != 0)
{
initializeDefaults();
return bs;
}
idbassert(access(fConfigFileName.c_str(), F_OK) == 0);
ifstream ifs(fConfigFileName.c_str());
int e = errno;
if (!ifs.good())
{
ostringstream oss;
oss << "Error opening WriteOnceConfig file " << fConfigFileName << ": " << strerror(e);
throw runtime_error(oss.str().c_str());
}
ifs >> bs;
return bs;
}
void WriteOnceConfig::save(ByteStream& ibs) const
{
ofstream ofs(fConfigFileName.c_str());
int e = errno;
if (!ofs.good())
{
ostringstream oss;
oss << "Error opening WriteOnceConfig file " << fConfigFileName << ": " << strerror(e);
throw runtime_error(oss.str().c_str());
}
ofs << ibs;
}
WriteOnceConfig::WriteOnceConfig(const char* cf)
{
string cfs;
if (cf != 0)
cfs = cf;
else
cfs = startup::StartUp::installDir() + "/etc/" + DefaultWriteOnceConfigFilename;
fConfigFileName = cfs;
setup();
}
void WriteOnceConfig::setConfig(const string& section, const string& name, const string& value, bool force)
{
EntryMap_t::iterator iter;
iter = fEntryMap.find(string(section + "." + name));
if (iter == fEntryMap.end())
{
ostringstream oss;
oss << "Invalid request for " << section << '.' << name;
throw runtime_error(oss.str().c_str());
}
if ((*iter->second).second && !force)
{
ostringstream oss;
oss << "Invalid attempt to write read-only " << section << '.' << name;
throw runtime_error(oss.str().c_str());
}
(*iter->second).first = value;
(*iter->second).second = true;
ByteStream obs;
serialize(obs);
save(obs);
}
const string WriteOnceConfig::getConfig(const string& section, const string& name) const
{
string val;
EntryMap_t::const_iterator iter;
iter = fEntryMap.find(string(section + "." + name));
if (iter == fEntryMap.end())
{
ostringstream oss;
oss << "Invalid request for " << section << '.' << name;
throw runtime_error(oss.str().c_str());
}
val = (*iter->second).first;
return val;
}
}

View File

@ -1,7 +0,0 @@
#!/bin/sh
export LD_LIBRARY_PATH=/usr/local/mariadb/columnstore/lib:$LD_LIBRARY_PATH
export CALPONT_CONFIG_FILE=/usr/local/mariadb/columnstore/etc/Columnstore.xml
export PATH=$PATH:/usr/local/hadoop-0.20.2/bin:/usr/local/mariadb/columnstore/bin
export CALPONT_HOME=/usr/local/mariadb/columnstore/etc
hadoop dfs -cat $1 | cpimport $2 $3

View File

@ -1,184 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.db;
import infinidb.hadoop.db.InfiniDBConfiguration;
import java.io.*;
import java.sql.*;
import java.util.Date;
import java.util.Formatter;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.LineRecordReader;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.*;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class IDBFileInputFormat extends org.apache.hadoop.mapred.FileInputFormat<NullWritable, NullWritable> {
private static final Logger LOG = LoggerFactory.getLogger(DBInputFormat.class);
@Override
public RecordReader<NullWritable, NullWritable> getRecordReader(InputSplit arg0, JobConf arg1,
Reporter arg2) throws IOException
{
final String filename = ((FileSplit)arg0).getPath().toString();
final JobConf job = arg1;
return new RecordReader<NullWritable, NullWritable>()
{
private boolean unread = true;
@Override
public void close() throws IOException
{}
@Override
public NullWritable createKey()
{
return NullWritable.get();
}
@Override
public NullWritable createValue()
{
return NullWritable.get();
}
@Override
public long getPos() throws IOException
{
return 0;
}
@Override
public float getProgress() throws IOException
{
return unread ? 0 : 1;
}
@Override
/* spawn a cpimport process for each input file */
public boolean next(NullWritable arg0, NullWritable arg1) throws IOException
{
InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job);
String schemaName = dbConf.getOutputSchemaName();
String tableName = (filename.substring(filename.lastIndexOf('/')+1, filename.length()));
tableName = tableName.substring(0, tableName.lastIndexOf('.'));
String output = job.get("mapred.output.dir");
if (unread)
{
try
{
StringBuilder loadCmdStr = new StringBuilder();
loadCmdStr.append(dbConf.getInfiniDBHome());
loadCmdStr.append("/bin/");
loadCmdStr.append("infinidoop_load.sh ");
loadCmdStr.append(filename);
loadCmdStr.append(" ");
loadCmdStr.append(schemaName);
loadCmdStr.append(" ");
loadCmdStr.append(tableName);
Process lChldProc = Runtime.getRuntime().exec(loadCmdStr.toString());
// Wait for the child to exit
lChldProc.waitFor();
BufferedReader lChldProcOutStream = new BufferedReader(new InputStreamReader(lChldProc.getInputStream()));
BufferedReader stdError = new BufferedReader(new InputStreamReader(lChldProc.getErrorStream()));
String lChldProcOutPutStr = null;
StringBuffer outpath = new StringBuffer();
outpath.append(job.getWorkingDirectory());
outpath.append("/");
outpath.append(output);
outpath.append("/");
outpath.append(tableName);
outpath.append(".log");
Path pt=new Path(outpath.toString());
FileSystem fs = FileSystem.get(new Configuration());
BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, false)));
// catch output
while ((lChldProcOutPutStr = lChldProcOutStream.readLine()) != null)
{
br.write(lChldProcOutPutStr);
br.newLine();
}
// catch error
while ((lChldProcOutPutStr = stdError.readLine()) != null)
{
br.write(lChldProcOutPutStr);
br.newLine();
}
//br.write(outpath.toString());
//br.newLine();
//br.write(loadCmdStr.toString());
//br.newLine();
//br.write(filename);
br.close();
lChldProcOutStream.close();
}
catch(Exception e)
{
e.printStackTrace();
}
unread = false;
return true;
}
else
{
return false;
}
}
};
}
@Override
protected boolean isSplitable(FileSystem fs, Path filename)
{
return false;
}
}

View File

@ -1,356 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package infinidb.hadoop.db;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable;
import org.apache.hadoop.mapred.lib.db.*;
/**
* A container for configuration property names for jobs with DB input/output.
* <br>
* The job can be configured using the static methods in this class,
* {@link DBInputFormat}, and {@link DBOutputFormat}.
* <p>
* Alternatively, the properties can be set in the configuration with proper
* values.
*
* @see DBConfiguration#configureDB(JobConf, String, String, String, String)
* @see DBInputFormat#setInput(JobConf, Class, String, String)
* @see DBInputFormat#setInput(JobConf, Class, String, String, String, String...)
* @see DBOutputFormat#setOutput(JobConf, String, String...)
*/
public class InfiniDBConfiguration{
/** Input schema name */
public static final String INPUT_SCHEMA_NAME_PROPERTY = "idb_hadoop.input.schema.name";
/** Output schema name */
public static final String OUTPUT_SCHEMA_NAME_PROPERTY = "idb_hadoop.output.schema.name";
/** Output table name */
public static final String OUTPUT_TABLE_NAMES_PROPERTY = "idb_hadoop.output.table.name";
/** @InfiniDB Split key for split the query task */
public static final String INPUT_SPLITKEY_NAME_PROPERTY = "idb_hadoop.splitkey.name";
/** @InfiniDB Split key min value */
public static final String INPUT_SPLITKEY_MIN_VAL = "idb_hadoop.splitkey.min.value";
/** @InfiniDB Split key max value */
public static final String INPUT_SPLITKEY_MAX_VAL = "idb_hadoop.splitkey.max.value";
/** @InfiniDB HOME path */
public static final String INFINIDB_HOME = "idb_hadoop.infinidb.home.path";
/** Input dir */
public static final String INPUT_PATH = "mapred.input.dir";
/** Output dir */
public static final String OUTPUT_PATH = "mapred.output.dir";
/**
* Sets the DB access related fields in the JobConf.
* @param job the job
* @param driverClass JDBC Driver class name
* @param dbUrl JDBC DB access URL.
* @param userName DB access username
* @param passwd DB access passwd
*/
public static void configureDB(JobConf job, String driverClass, String dbUrl
, String userName, String passwd)
{
job.set(DBConfiguration.DRIVER_CLASS_PROPERTY, driverClass);
job.set(DBConfiguration.URL_PROPERTY, dbUrl);
if(userName != null)
job.set(DBConfiguration.USERNAME_PROPERTY, userName);
if(passwd != null)
job.set(DBConfiguration.PASSWORD_PROPERTY, passwd);
}
/**
* Sets the DB access related fields in the JobConf.
* @param job the job
* @param driverClass JDBC Driver class name
* @param dbUrl JDBC DB access URL.
*/
public static void configureDB(JobConf job, String driverClass, String dbUrl)
{
configureDB(job, driverClass, dbUrl, null, null);
}
private JobConf job;
public InfiniDBConfiguration(JobConf job)
{
this.job = job;
}
/** Returns a connection object o the DB
* @throws ClassNotFoundException
* @throws SQLException
*/
Connection getConnection() throws IOException
{
try
{
Class.forName(job.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
}catch (ClassNotFoundException exception)
{
throw new IOException("Conection driver can not be loaded", exception);
}
try
{
if(job.get(DBConfiguration.USERNAME_PROPERTY) == null)
{
return DriverManager.getConnection(job.get(DBConfiguration.URL_PROPERTY));
}
else
{
return DriverManager.getConnection(
job.get(DBConfiguration.URL_PROPERTY),
job.get(DBConfiguration.USERNAME_PROPERTY),
job.get(DBConfiguration.PASSWORD_PROPERTY));
}
}catch (SQLException exception)
{
throw new IOException("Conection can not be established", exception);
}
}
String getInputSchemaName()
{
return job.get(InfiniDBConfiguration.INPUT_SCHEMA_NAME_PROPERTY);
}
void setInputSchemaName(String schemaName)
{
job.set(InfiniDBConfiguration.INPUT_SCHEMA_NAME_PROPERTY, schemaName);
}
String getInputTableName()
{
return job.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY);
}
void setInputTableName(String tableName)
{
job.set(DBConfiguration.INPUT_TABLE_NAME_PROPERTY, tableName);
}
String[] getInputFieldNames()
{
return job.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY);
}
void setInputFieldNames(String... fieldNames)
{
job.setStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY, fieldNames);
}
String getInputConditions()
{
return job.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY);
}
void setInputConditions(String conditions)
{
if (conditions != null && conditions.length() > 0)
job.set(DBConfiguration.INPUT_CONDITIONS_PROPERTY, conditions);
}
String getInputOrderBy()
{
return job.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY);
}
/** @InfiniDB */
void setSplitKey(String key)
{
job.setStrings(InfiniDBConfiguration.INPUT_SPLITKEY_NAME_PROPERTY, key);
}
/** @InfiniDB */
String getSplitKey()
{
return job.get(InfiniDBConfiguration.INPUT_SPLITKEY_NAME_PROPERTY);
}
/** @InfiniDB */
public void setMinVal(long value)
{
job.setLong(INPUT_SPLITKEY_MIN_VAL, value);
}
/** @InfiniDB */
public Long getMinVal()
{
if(job.get(INPUT_SPLITKEY_MIN_VAL)==null)
return null;
return job.getLong(INPUT_SPLITKEY_MIN_VAL, -1);
}
/** @InfiniDB */
public void setMaxVal(long value)
{
job.setFloat(INPUT_SPLITKEY_MAX_VAL, value);
}
/** @InfiniDB */
public Long getMaxVal()
{
if(job.get(INPUT_SPLITKEY_MAX_VAL)==null)
return null;
return job.getLong(INPUT_SPLITKEY_MAX_VAL, -1);
}
void setInputOrderBy(String orderby)
{
if(orderby != null && orderby.length() >0)
{
job.set(DBConfiguration.INPUT_ORDER_BY_PROPERTY, orderby);
}
}
String getInputQuery()
{
return job.get(DBConfiguration.INPUT_QUERY);
}
void setInputQuery(String query)
{
if(query != null && query.length() >0)
{
job.set(DBConfiguration.INPUT_QUERY, query);
}
}
String getInputCountQuery()
{
return job.get(DBConfiguration.INPUT_COUNT_QUERY);
}
void setInputCountQuery(String query)
{
if(query != null && query.length() >0)
{
job.set(DBConfiguration.INPUT_COUNT_QUERY, query);
}
}
Class<?> getInputClass()
{
return job.getClass(DBConfiguration.INPUT_CLASS_PROPERTY, NullDBWritable.class);
}
void setInputClass(Class<? extends DBWritable> inputClass)
{
job.setClass(DBConfiguration.INPUT_CLASS_PROPERTY, inputClass, DBWritable.class);
}
String getOutputSchemaName()
{
return job.get(InfiniDBConfiguration.OUTPUT_SCHEMA_NAME_PROPERTY);
}
void setOutputSchemaName(String schemaName)
{
job.set(InfiniDBConfiguration.OUTPUT_SCHEMA_NAME_PROPERTY, schemaName);
}
String[] getOutputTableNames()
{
return job.getStrings(InfiniDBConfiguration.OUTPUT_TABLE_NAMES_PROPERTY);
}
void setOutputTableNames(String... tableNames)
{
job.setStrings(InfiniDBConfiguration.OUTPUT_TABLE_NAMES_PROPERTY, tableNames);
}
String[] getOutputFieldNames()
{
return job.getStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY);
}
void setOutputFieldNames(String... fieldNames)
{
job.setStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY, fieldNames);
}
public String getInfiniDBHome()
{
return job.get(InfiniDBConfiguration.INFINIDB_HOME);
}
public void setInfiniDBHome(String path)
{
job.set(InfiniDBConfiguration.INFINIDB_HOME, path);
}
public String getInputPath()
{
return job.get(InfiniDBConfiguration.INPUT_PATH);
}
public void setInputPath(String path)
{
job.set(InfiniDBConfiguration.INPUT_PATH, path);
}
public String getOutputPath()
{
return job.get(InfiniDBConfiguration.OUTPUT_PATH);
}
public void setOutputPath(String path)
{
job.set(InfiniDBConfiguration.OUTPUT_PATH, path);
}
}

View File

@ -1,442 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.db;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.mapred.lib.db.*;
/**
* A InputFormat that reads input data from an SQL table.
* <p>
* DBInputFormat emits LongWritables containing the record number as
* key and DBWritables as value.
*
* The SQL query, and input class can be using one of the two
* setInput methods.
*/
public class InfiniDBInputFormat<T extends DBWritable>
implements InputFormat<LongWritable, T>, JobConfigurable
{
/**
* A RecordReader that reads records from a SQL table.
* Emits LongWritables containing the record number as
* key and DBWritables as value.
*/
protected class DBRecordReader implements RecordReader<LongWritable, T>
{
private ResultSet results;
private Statement statement;
private Class<T> inputClass;
private JobConf job;
private InfiniDBInputSplit split;
private long pos = 0;
/**
* @param split The InputSplit to read data for
* @throws SQLException
*/
protected DBRecordReader(InfiniDBInputSplit split, Class<T> inputClass, JobConf job) throws SQLException
{
this.inputClass = inputClass;
this.split = split;
this.job = job;
statement = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
//statement.setFetchSize(Integer.MIN_VALUE);
results = statement.executeQuery(getSelectQuery());
}
/** @InfiniDB */
public <S> String concat(S[] arr, String sep)
{
String ret = "";
for(int i=0; i < arr.length; i++)
{
ret = ret + arr[i];
if(i < arr.length-1)
{
ret = ret + sep;
}
}
return ret;
}
/** @InfiniDB Returns the query for selecting the records,
* subclasses can override this for custom behaviour.*/
protected String getSelectQuery()
{
InfiniDBConfiguration conf = new InfiniDBConfiguration(job);
StringBuilder query = new StringBuilder();
query.append("SELECT ");
query.append(concat(conf.getInputFieldNames(), ","));
query.append(" FROM ");
query.append(conf.getInputTableName());
query.append(" WHERE ");
query.append(split.splitKey + ">=" + split.getStart());
query.append(" AND ");
query.append(split.splitKey + "<" + split.getEnd());
if (conditions != null && conditions.length() > 0)
query.append(" AND (").append(conditions).append(")");
return query.toString();
}
/** {@inheritDoc} */
public void close() throws IOException
{
try
{
connection.commit();
results.close();
statement.close();
} catch (SQLException e)
{
throw new IOException(e.getMessage());
}
}
/** {@inheritDoc} */
public LongWritable createKey()
{
return new LongWritable();
}
/** {@inheritDoc} */
public T createValue()
{
return ReflectionUtils.newInstance(inputClass, job);
}
/** {@inheritDoc} */
public long getPos() throws IOException
{
return pos;
}
/** {@inheritDoc} */
public float getProgress() throws IOException
{
return pos / (float)split.getLength();
}
/** {@inheritDoc} */
public boolean next(LongWritable key, T value) throws IOException
{
try
{
if (!results.next())
return false;
// Set the key field value as the output key value
key.set(pos + split.getStart());
value.readFields(results);
pos ++;
} catch (SQLException e)
{
throw new IOException(e.getMessage());
}
return true;
}
}
/**
* A Class that does nothing, implementing DBWritable
*/
public static class NullDBWritable implements DBWritable, Writable {
@Override
public void readFields(DataInput in) throws IOException { }
@Override
public void readFields(ResultSet arg0) throws SQLException { }
@Override
public void write(DataOutput out) throws IOException { }
@Override
public void write(PreparedStatement arg0) throws SQLException { }
}
/**
* A InputSplit that spans a set of rows
*/
protected static class InfiniDBInputSplit implements InputSplit {
private long end = 0;
private long start = 0;
private String splitKey;
/**
* Default Constructor
*/
public InfiniDBInputSplit() {
}
/**
* @InfiniDB
* Convenience Constructor
* @param start the index of the first row to select
* @param end the index of the last row to select
*/
public InfiniDBInputSplit(long start, long end, String key) {
this.start = start;
this.end = end;
this.splitKey = key;
}
/** {@inheritDoc} */
public String[] getLocations() throws IOException {
return new String[] {};
}
/**
* @return The index of the first row to select
*/
public long getStart() {
return start;
}
/**
* @return The index of the last row to select
*/
public long getEnd() {
return end;
}
/**
* @return The total row count in this split
*/
public long getLength() throws IOException {
return end - start;
}
/** {@inheritDoc} */
public void readFields(DataInput input) throws IOException {
start = input.readLong();
end = input.readLong();
splitKey = WritableUtils.readString(input);
}
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {
output.writeLong(start);
output.writeLong(end);
WritableUtils.writeString(output, splitKey);
}
}
private String conditions;
private Connection connection;
private String tableName;
private String[] fieldNames;
private InfiniDBConfiguration dbConf;
/** {@inheritDoc} */
public void configure(JobConf job) {
dbConf = new InfiniDBConfiguration(job);
try {
this.connection = dbConf.getConnection();
this.connection.setAutoCommit(false);
connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
tableName = dbConf.getInputTableName();
fieldNames = dbConf.getInputFieldNames();
conditions = dbConf.getInputConditions();
}
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
public RecordReader<LongWritable, T> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
Class inputClass = dbConf.getInputClass();
try {
return new DBRecordReader((InfiniDBInputSplit) split, inputClass, job);
}
catch (SQLException ex) {
throw new IOException(ex.getMessage());
}
}
/** @InfiniDB */
private long getMaxVal(InfiniDBConfiguration conf, Connection conn, String tableName, String col) {
if(conf.getMaxVal()!=null) {
return conf.getMaxVal();
}
try {
PreparedStatement s = conn.prepareStatement("SELECT MAX(" + col + ") FROM " + tableName);
ResultSet rs = s.executeQuery();
rs.next();
long ret = rs.getLong(1);
rs.close();
s.close();
return ret;
} catch(SQLException e) {
throw new RuntimeException(e);
}
}
/** @InfiniDB */
private long getMinVal(InfiniDBConfiguration conf, Connection conn, String tableName, String col ) {
if(conf.getMinVal()!=null) {
return conf.getMinVal();
}
try {
PreparedStatement s = conn.prepareStatement("SELECT MIN(" + col + ") FROM " + tableName);
ResultSet rs = s.executeQuery();
rs.next();
long ret = rs.getLong(1);
rs.close();
s.close();
return ret;
} catch(SQLException e) {
throw new RuntimeException(e);
}
}
/** {@inheritDoc}
* @InfiniDB
*/
public InputSplit[] getSplits(JobConf job, int chunks) throws IOException {
try {
InfiniDBConfiguration conf = new InfiniDBConfiguration(job);
Connection conn = conf.getConnection();
String splitKey = conf.getSplitKey();
long maxVal = getMaxVal(conf, conn, conf.getInputTableName(), conf.getSplitKey());
long minVal = getMinVal(conf, conn, conf.getInputTableName(), conf.getSplitKey());
System.out.println("max=" + maxVal);
System.out.println("min=" + minVal);
InputSplit[] ret = new InputSplit[chunks];
long chunkSize = (maxVal - minVal + 1) / chunks + 1;
long start = minVal;
for (int i = 0; i < chunks; i++){
ret[i] = new InfiniDBInputSplit(start, start+chunkSize, splitKey);
start += chunkSize;
}
conn.close();
return ret;
} catch(SQLException e) {
throw new RuntimeException(e);
}
}
/** Returns the query for getting the total number of rows,
* subclasses can override this for custom behaviour.*/
protected String getCountQuery() {
if(dbConf.getInputCountQuery() != null) {
return dbConf.getInputCountQuery();
}
StringBuilder query = new StringBuilder();
query.append("SELECT COUNT(*) FROM " + tableName);
if (conditions != null && conditions.length() > 0)
query.append(" WHERE " + conditions);
return query.toString();
}
/**
* @InfiniDB
* Initializes the map-part of the job with the appropriate input settings.
*
* @param job The job
* @param inputClass the class object implementing DBWritable, which is the
* Java object holding tuple fields.
* @param tableName The table to read data from
* @param conditions The condition which to select data with, eg. '(updated >
* 20070101 AND length > 0)'
* @param key the field name used for split key.
* @param fieldNames The field names in the table
* @see #setInput(JobConf, Class, String, String)
*/
public static void setInput(JobConf job, Class<? extends DBWritable> inputClass,
String tableName,String conditions, String key, String... fieldNames) {
job.setInputFormat(InfiniDBInputFormat.class);
InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job);
dbConf.setInputClass(inputClass);
dbConf.setInputTableName(tableName);
dbConf.setInputFieldNames(fieldNames);
dbConf.setInputConditions(conditions);
dbConf.setSplitKey(key);
}
/**
* @InfiniDB
* Initializes the map-part of the job with the appropriate input settings.
*
* @param job The job
* @param inputClass the class object implementing DBWritable, which is the
* Java object holding tuple fields.
* @param inputQuery the input query to select fields. Example :
* "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
* @param inputCountQuery the input query that returns the number of records in
* the table.
* Example : "SELECT COUNT(f1) FROM Mytable"
* @see #setInput(JobConf, Class, String, String, String, String...)
*/
public static void setInput(JobConf job, Class<? extends DBWritable> inputClass,
String inputQuery, String inputCountQuery) {
job.setInputFormat(InfiniDBInputFormat.class);
InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job);
dbConf.setInputClass(inputClass);
dbConf.setInputQuery(inputQuery);
dbConf.setInputCountQuery(inputCountQuery);
}
}

View File

@ -1,139 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.db;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.mapred.lib.db.*;
/**
* A OutputFormat that sends the reduce output to a SQL table.
* <p>
* {@link DBOutputFormat} accepts &lt;key,value&gt; pairs, where
* key has a type extending DBWritable. Returned {@link RecordWriter}
* writes <b>only the key</b> to the database with a batch SQL query.
*
*/
public class InfiniDBOutputFormat<K /*extends DBWritable*/, V>
implements OutputFormat<K,V> {
private static final Log LOG = LogFactory.getLog(InfiniDBOutputFormat.class);
/**
* A RecordWriter that writes the reduce output to a SQL table
*/
protected class DBRecordWriter
implements RecordWriter<K, V> {
private Connection connection;
private PreparedStatement statement;
protected DBRecordWriter() throws SQLException
{}
/** {@inheritDoc} */
public void close(Reporter reporter) throws IOException
{}
/** {@inheritDoc} */
public void write(K key, V value) throws IOException
{}
}
/** {@inheritDoc} */
public void checkOutputSpecs(FileSystem filesystem, JobConf job)
throws IOException
{}
/** {@inheritDoc} */
public RecordWriter<K, V> getRecordWriter(FileSystem filesystem,
JobConf job, String name, Progressable progress) throws IOException
{
try {
return new DBRecordWriter();
}
catch (Exception ex) {
throw new IOException(ex.getMessage());
}
}
/**
* Initializes the reduce-part of the job with the appropriate output settings
*
* @param job
* The job
* @param tableName
* The table to insert data into
* @param fieldNames
* The field names in the table. If unknown, supply the appropriate
* number of nulls.
*/
public static void setOutput(JobConf job, String schemaName, String ... tableNames)
{
job.setOutputFormat(InfiniDBOutputFormat.class);
job.setReduceSpeculativeExecution(false);
InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job);
dbConf.setOutputSchemaName(schemaName);
dbConf.setOutputTableNames(tableNames);
}
/**
* Initializes the reduce-part of the job with the appropriate output settings
*
* @param job
* The job
* @param tableName
* The table to insert data into
* @param fieldNames
* The field names in the table. If unknown, supply the appropriate
* number of nulls.
*/
public static void setOutput(JobConf job, String schemaName)
{
job.setOutputFormat(InfiniDBOutputFormat.class);
job.setReduceSpeculativeExecution(false);
InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job);
dbConf.setOutputSchemaName(schemaName);
}
}

View File

@ -1,88 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.example;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.*;
import java.util.Date;
import java.util.Formatter;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.lib.db.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.*;
import infinidb.hadoop.db.*;
public class InfiniDBOutputDriver extends Configured implements Tool
{
public int run (String[] args) throws Exception
{
Configuration conf = new Configuration();
JobConf jobconf = new JobConf(conf, InfiniDoopDriver.class);
DBConfiguration.configureDB(jobconf,
"com.mysql.jdbc.Driver",
"jdbc:mysql://srvswint4/tpch1","root", "");
String [] fields = { "n_nationkey", "n_name" };
String [] outFields = {"id", "name"};
jobconf.setInputFormat(IDBFileInputFormat.class);
jobconf.setOutputFormat(InfiniDBOutputFormat.class);
jobconf.setOutputKeyClass(NullWritable.class);
jobconf.setOutputValueClass(Text.class);
InfiniDBOutputFormat.setOutput(jobconf, "db", outFields);
InfiniDBConfiguration idbconf = new InfiniDBConfiguration(jobconf);
idbconf.setInputPath("input");
idbconf.setOutputPath("output");
idbconf.setInfiniDBHome("/usr/local/mariadb/columnstore");
jobconf.setMapperClass(InfiniDoopMapper.class);
jobconf.setNumMapTasks(1);
jobconf.setNumReduceTasks(2);
JobClient client = new JobClient();
client.setConf(jobconf);
try {
JobClient.runJob(jobconf);
} catch (Exception e) {
e.printStackTrace();
}
return 0;
}
public static void main(String [] args) throws Exception
{
int ret = ToolRunner.run(new InfiniDBOutputDriver(), args);
System.exit(ret);
}
}

View File

@ -1,90 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.example;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.*;
import java.util.Date;
import java.util.Formatter;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.*;
import infinidb.hadoop.db.*;
import infinidb.hadoop.db.InfiniDBConfiguration;
public class InfiniDoopDriver extends Configured implements Tool
{
public int run (String[] args) throws Exception
{
Configuration conf = new Configuration();
JobConf jobconf = new JobConf(conf, InfiniDoopDriver.class);
DBConfiguration.configureDB(jobconf,
"com.mysql.jdbc.Driver",
"jdbc:mysql://srvswint4/tpch1","root", "");
String [] fields = { "n_nationkey", "n_name" };
jobconf.setInputFormat(InfiniDBInputFormat.class);
jobconf.setOutputKeyClass(LongWritable.class);
jobconf.setOutputValueClass(Text.class);
InfiniDBInputFormat.setInput(jobconf, InfiniDoopRecord.class, "nation",
null, "n_nationkey", fields);
InfiniDBConfiguration idbconf = new InfiniDBConfiguration(jobconf);
idbconf.setOutputPath("output2");
jobconf.setMapperClass(InfiniDoopInputMapper.class);
jobconf.setNumMapTasks(4);
jobconf.setNumReduceTasks(1);
jobconf.set("mapred.textoutputformat.separator", "|");
JobClient client = new JobClient();
client.setConf(jobconf);
try {
JobClient.runJob(jobconf);
} catch (Exception e) {
e.printStackTrace();
}
return 0;
}
public static void main(String [] args) throws Exception
{
int ret = ToolRunner.run(new InfiniDoopDriver(), args);
System.exit(ret);
}
}

View File

@ -1,43 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.example;
import java.io.IOException;
import java.io.*;
import java.sql.*;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.*;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class InfiniDoopInputMapper extends MapReduceBase implements
Mapper<LongWritable, InfiniDoopRecord, LongWritable, Text> {
public void map(LongWritable key, InfiniDoopRecord val,
OutputCollector<LongWritable, Text> output, Reporter reporter) throws IOException {
output.collect(new LongWritable(val.id), new Text(val.name));
}
}

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.example;
import java.io.IOException;
import java.io.*;
import java.sql.*;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.*;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/** Dummy mapper, basically doing nothing. the real job is invoked by input format */
public class InfiniDoopMapper extends MapReduceBase implements
Mapper<NullWritable, NullWritable, NullWritable, NullWritable> {
public void map(NullWritable key, NullWritable val,
OutputCollector<NullWritable, NullWritable> output, Reporter reporter) throws IOException {
NullWritable n = NullWritable.get();
output.collect(n, n);
}
}

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2014 InfiniDB, Inc.
*
* InfiniDB, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package infinidb.hadoop.example;
import java.io.IOException;
import java.io.*;
import java.sql.*;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.*;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class InfiniDoopRecord implements Writable, DBWritable, WritableComparable<InfiniDoopRecord> {
long id;
String name;
public void readFields(DataInput in) throws IOException {
this.id = in.readLong();
this.name = Text.readString(in);
}
public void readFields(ResultSet resultSet)
throws SQLException {
this.id = resultSet.getLong(1);
this.name = resultSet.getString(2);
}
public void write(DataOutput out) throws IOException {
out.writeLong(this.id);
Text.writeString(out, this.name);
}
public void write(PreparedStatement stmt) throws SQLException {
stmt.setLong(1, this.id);
stmt.setString(2, this.name);
}
public int compareTo(InfiniDoopRecord w) {
return (this.id < w.id ? -1 :(this.id == w.id ? 0 : 1));
}
}

View File

@ -30,5 +30,5 @@ set_target_properties(loggingcpp PROPERTIES VERSION 1.0.0 SOVERSION 1)
install(TARGETS loggingcpp DESTINATION ${ENGINE_LIBDIR} COMPONENT libs)
install(FILES MessageFile.txt ErrorMessage.txt DESTINATION ${ENGINE_ETCDIR} COMPONENT platform)
install(FILES MessageFile.txt ErrorMessage.txt DESTINATION ${ENGINE_SYSCONFDIR}/columnstore COMPONENT platform)

View File

@ -34,6 +34,7 @@ using namespace std;
#include <boost/thread.hpp>
using namespace boost;
#include "config.h"
#include "configcpp.h"
using namespace config;
#include "loggingid.h"
@ -64,7 +65,7 @@ IDBErrorInfo::IDBErrorInfo()
string configFile(cf->getConfig("SystemConfig", "ErrorMessageFile"));
if (configFile.length() == 0)
configFile = startup::StartUp::installDir() + "/etc/ErrorMessage.txt";
configFile = std::string(MCSSYSCONFDIR) + "/columnstore/ErrorMessage.txt";
ifstream msgFile(configFile.c_str());

View File

@ -34,6 +34,7 @@ using namespace std;
#include <boost/thread.hpp>
using namespace boost;
#include "config.h"
#include "configcpp.h"
using namespace config;
#include "messageobj.h"
@ -56,7 +57,7 @@ void loadCatalog()
string configFile(cf->getConfig("MessageLog", "MessageLogFile"));
if (configFile.length() == 0)
configFile = startup::StartUp::installDir() + "/etc/MessageFile.txt";
configFile = std::string(MCSSYSCONFDIR) + "/columnstore/MessageFile.txt";
ifstream msgFile(configFile.c_str());

View File

@ -1,208 +0,0 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
/* Define to 1 if you have the <arpa/inet.h> header file. */
#define HAVE_ARPA_INET_H 1
/* Define to 1 if you have the `atexit' function. */
#define HAVE_ATEXIT 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if you have the `dlsym' function. */
#define HAVE_DLSYM 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the <getopt.h> header file. */
#define HAVE_GETOPT_H 1
/* Define to 1 if you have the `getopt_long' function. */
#define HAVE_GETOPT_LONG 1
/* Define to 1 if you have the `htons' function. */
#define HAVE_HTONS 1
/* Define to 1 if you have the `inet_aton' function. */
#define HAVE_INET_ATON 1
/* Define to 1 if you have the `inet_pton' function. */
#define HAVE_INET_PTON 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if the system has the type `in_addr_t'. */
#define HAVE_IN_ADDR_T 1
/* Define to 1 if you have the `dl' library (-ldl). */
#define HAVE_LIBDL 1
/* Define to 1 if you have the `pthread' library (-lpthread). */
#define HAVE_LIBPTHREAD 1
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
/* Define when you have an LLSEEK prototype */
/* #undef HAVE_LLSEEK_PROTOTYPE */
/* Define when the compiler supports LOFF_T type */
#define HAVE_LOFF_T 1
/* Define when the compiler supports LONG_LONG type */
#define HAVE_LONG_LONG 1
/* Define to 1 if you have the `lseek64' function. */
#define HAVE_LSEEK64 1
/* Define when you have an LSEEK64 prototype */
#define HAVE_LSEEK64_PROTOTYPE 1
/* Define to 1 if you have the <malloc.h> header file. */
#define HAVE_MALLOC_H 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the <netdb.h> header file. */
#define HAVE_NETDB_H 1
/* Define to 1 if you have the <netinet/in.h> header file. */
#define HAVE_NETINET_IN_H 1
/* Define to 1 if you have the <net/if.h> header file. */
#define HAVE_NET_IF_H 1
/* Define when the compiler supports OFFSET_T type */
/* #undef HAVE_OFFSET_T */
/* Define when the system has a 64 bit off_t type */
#define HAVE_OFF_T_64 1
/* Define to 1 if you have the `on_exit' function. */
#define HAVE_ON_EXIT 1
/* Define to 1 if you have the <signal.h> header file. */
#define HAVE_SIGNAL_H 1
/* Define to 1 if you have the `snprintf' function. */
#define HAVE_SNPRINTF 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if `imr_ifindex' is member of `struct ip_mreqn'. */
#define HAVE_STRUCT_IP_MREQN_IMR_IFINDEX 1
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/select.h> header file. */
#define HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/sockio.h> header file. */
/* #undef HAVE_SYS_SOCKIO_H */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/termios.h> header file. */
/* #undef HAVE_SYS_TERMIOS_H */
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/uio.h> header file. */
#define HAVE_SYS_UIO_H 1
/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */
#define HAVE_SYS_WAIT_H 1
/* Define to 1 if you have the `tcsetattr' function. */
#define HAVE_TCSETATTR 1
/* Define to 1 if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the <winsock2.h> header file. */
/* #undef HAVE_WINSOCK2_H */
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME ""
/* Define to the version of this package. */
#define PACKAGE_VERSION ""
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define TIME_WITH_SYS_TIME 1
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
/* #undef TM_IN_SYS_TIME */
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef size_t */

View File

@ -1,427 +0,0 @@
/* lib/cpp/src/thrift/config.h. Generated from config.hin by configure. */
/* config.hin. Generated from configure.ac by autoheader. */
#ifndef CONFIG_H
#define CONFIG_H
/* Define if the AI_ADDRCONFIG symbol is unavailable */
/* #undef AI_ADDRCONFIG */
/* Possible value for SIGNED_RIGHT_SHIFT_IS */
#define ARITHMETIC_RIGHT_SHIFT 1
/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
systems. This function is required for `alloca.c' support on those systems.
*/
/* #undef CRAY_STACKSEG_END */
/* Define to 1 if using `alloca.c'. */
/* #undef C_ALLOCA */
/* Define to 1 if you have the `alarm' function. */
#define HAVE_ALARM 1
/* Define to 1 if you have `alloca', as a function or macro. */
#define HAVE_ALLOCA 1
/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
*/
#define HAVE_ALLOCA_H 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
#define HAVE_ARPA_INET_H 1
/* define if the Boost library is available */
#define HAVE_BOOST /**/
/* Define to 1 if you have the `bzero' function. */
#define HAVE_BZERO 1
/* Define to 1 if you have the `clock_gettime' function. */
#define HAVE_CLOCK_GETTIME 1
/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you
don't. */
#define HAVE_DECL_STRERROR_R 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
/* #undef HAVE_DOPRNT */
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the `fork' function. */
#define HAVE_FORK 1
/* Define to 1 if you have the `ftruncate' function. */
#define HAVE_FTRUNCATE 1
/* Define to 1 if you have the `gethostbyname' function. */
#define HAVE_GETHOSTBYNAME 1
/* Define to 1 if you have the `gettimeofday' function. */
#define HAVE_GETTIMEOFDAY 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* define if libevent is available */
/* #undef HAVE_LIBEVENT */
/* Define to 1 if you have the <libintl.h> header file. */
#define HAVE_LIBINTL_H 1
/* Define to 1 if you have the `pthread' library (-lpthread). */
#define HAVE_LIBPTHREAD 1
/* Define to 1 if you have the `rt' library (-lrt). */
#define HAVE_LIBRT 1
/* Define to 1 if you have the `socket' library (-lsocket). */
/* #undef HAVE_LIBSOCKET */
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
/* Define to 1 if your system has a GNU libc compatible `malloc' function, and
to 0 otherwise. */
#define HAVE_MALLOC 1
/* Define to 1 if you have the <malloc.h> header file. */
#define HAVE_MALLOC_H 1
/* Define to 1 if you have the `memmove' function. */
#define HAVE_MEMMOVE 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `memset' function. */
#define HAVE_MEMSET 1
/* Define to 1 if you have the `mkdir' function. */
#define HAVE_MKDIR 1
/* Define to 1 if you have the <netdb.h> header file. */
#define HAVE_NETDB_H 1
/* Define to 1 if you have the <netinet/in.h> header file. */
#define HAVE_NETINET_IN_H 1
/* Define to 1 if you have the <openssl/rand.h> header file. */
#define HAVE_OPENSSL_RAND_H 1
/* Define to 1 if you have the <openssl/ssl.h> header file. */
#define HAVE_OPENSSL_SSL_H 1
/* Define to 1 if you have the <openssl/x509v3.h> header file. */
#define HAVE_OPENSSL_X509V3_H 1
/* Define to 1 if you have the <pthread.h> header file. */
#define HAVE_PTHREAD_H 1
/* Define to 1 if the system has the type `ptrdiff_t'. */
#define HAVE_PTRDIFF_T 1
/* Define to 1 if your system has a GNU libc compatible `realloc' function,
and to 0 otherwise. */
#define HAVE_REALLOC 1
/* Define to 1 if you have the `realpath' function. */
#define HAVE_REALPATH 1
/* Define to 1 if you have the `sched_get_priority_max' function. */
#define HAVE_SCHED_GET_PRIORITY_MAX 1
/* Define to 1 if you have the `sched_get_priority_min' function. */
#define HAVE_SCHED_GET_PRIORITY_MIN 1
/* Define to 1 if you have the <sched.h> header file. */
#define HAVE_SCHED_H 1
/* Define to 1 if you have the `select' function. */
#define HAVE_SELECT 1
/* Define to 1 if you have the `socket' function. */
#define HAVE_SOCKET 1
/* Define to 1 if you have the `sqrt' function. */
#define HAVE_SQRT 1
/* Define to 1 if `stat' has the bug that it succeeds when given the
zero-length file name argument. */
/* #undef HAVE_STAT_EMPTY_STRING_BUG */
/* Define to 1 if stdbool.h conforms to C99. */
#define HAVE_STDBOOL_H 1
/* Define to 1 if you have the <stddef.h> header file. */
#define HAVE_STDDEF_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the `strchr' function. */
#define HAVE_STRCHR 1
/* Define to 1 if you have the `strdup' function. */
#define HAVE_STRDUP 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the `strerror_r' function. */
#define HAVE_STRERROR_R 1
/* Define to 1 if you have the `strftime' function. */
#define HAVE_STRFTIME 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strstr' function. */
#define HAVE_STRSTR 1
/* Define to 1 if you have the `strtol' function. */
#define HAVE_STRTOL 1
/* Define to 1 if you have the `strtoul' function. */
#define HAVE_STRTOUL 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/poll.h> header file. */
#define HAVE_SYS_POLL_H 1
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/select.h> header file. */
#define HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/un.h> header file. */
#define HAVE_SYS_UN_H 1
/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */
#define HAVE_SYS_WAIT_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the `vfork' function. */
#define HAVE_VFORK 1
/* Define to 1 if you have the <vfork.h> header file. */
/* #undef HAVE_VFORK_H */
/* Define to 1 if you have the `vprintf' function. */
#define HAVE_VPRINTF 1
/* Define to 1 if you have the <wchar.h> header file. */
#define HAVE_WCHAR_H 1
/* Define to 1 if `fork' works. */
#define HAVE_WORKING_FORK 1
/* Define to 1 if `vfork' works. */
#define HAVE_WORKING_VFORK 1
/* define if zlib is available */
#define HAVE_ZLIB /**/
/* Define to 1 if the system has the type `_Bool'. */
/* #undef HAVE__BOOL */
/* Possible value for SIGNED_RIGHT_SHIFT_IS */
#define LOGICAL_RIGHT_SHIFT 2
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
slash. */
#define LSTAT_FOLLOWS_SLASHED_SYMLINK 1
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "thrift"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME "thrift"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "thrift 0.9.1"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "thrift"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "0.9.1"
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
/* Define to the type of arg 1 for `select'. */
#define SELECT_TYPE_ARG1 int
/* Define to the type of args 2, 3 and 4 for `select'. */
#define SELECT_TYPE_ARG234 (fd_set *)
/* Define to the type of arg 5 for `select'. */
#define SELECT_TYPE_ARG5 (struct timeval *)
/* Indicates the effect of the right shift operator on negative signed
integers */
#define SIGNED_RIGHT_SHIFT_IS 1
/* If using the C implementation of alloca, define if you know the
direction of stack growth for your system; otherwise it will be
automatically deduced at runtime.
STACK_DIRECTION > 0 => grows toward higher addresses
STACK_DIRECTION < 0 => grows toward lower addresses
STACK_DIRECTION = 0 => direction of growth unknown */
/* #undef STACK_DIRECTION */
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if strerror_r returns char *. */
#define STRERROR_R_CHAR_P 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define TIME_WITH_SYS_TIME 1
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
/* #undef TM_IN_SYS_TIME */
/* Possible value for SIGNED_RIGHT_SHIFT_IS */
#define UNKNOWN_RIGHT_SHIFT 3
/* experimental --enable-boostthreads that replaces POSIX pthread by
boost::thread */
/* #undef USE_BOOST_THREAD */
/* Version number of package */
#define VERSION "0.9.1"
/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a
`char[]'. */
#define YYTEXT_POINTER 1
/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT32_T */
/* Define for Solaris 2.5.1 so the uint64_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT64_T */
/* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT8_T */
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to the type of a signed integer type of width exactly 16 bits if
such a type exists and the standard includes do not define it. */
/* #undef int16_t */
/* Define to the type of a signed integer type of width exactly 32 bits if
such a type exists and the standard includes do not define it. */
/* #undef int32_t */
/* Define to the type of a signed integer type of width exactly 64 bits if
such a type exists and the standard includes do not define it. */
/* #undef int64_t */
/* Define to the type of a signed integer type of width exactly 8 bits if such
a type exists and the standard includes do not define it. */
/* #undef int8_t */
/* Define to rpl_malloc if the replacement function should be used. */
/* #undef malloc */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef mode_t */
/* Define to `long int' if <sys/types.h> does not define. */
/* #undef off_t */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef pid_t */
/* Define to rpl_realloc if the replacement function should be used. */
/* #undef realloc */
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef size_t */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef ssize_t */
/* Define to the type of an unsigned integer type of width exactly 16 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint16_t */
/* Define to the type of an unsigned integer type of width exactly 32 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint32_t */
/* Define to the type of an unsigned integer type of width exactly 64 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint64_t */
/* Define to the type of an unsigned integer type of width exactly 8 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint8_t */
/* Define as `fork' if `vfork' does not work. */
/* #undef vfork */
/* Define to empty if the keyword `volatile' does not work. Warning: valid
code using `volatile' can become incorrect without. Disable with care. */
/* #undef volatile */
#endif

View File

@ -1,24 +1,427 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* lib/cpp/src/thrift/config.h. Generated from config.hin by configure. */
/* config.hin. Generated from configure.ac by autoheader. */
#ifdef _WIN32
# include <thrift/windows/config.h>
#else
# include <thrift/config.h>
#ifndef CONFIG_H
#define CONFIG_H
/* Define if the AI_ADDRCONFIG symbol is unavailable */
/* #undef AI_ADDRCONFIG */
/* Possible value for SIGNED_RIGHT_SHIFT_IS */
#define ARITHMETIC_RIGHT_SHIFT 1
/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
systems. This function is required for `alloca.c' support on those systems.
*/
/* #undef CRAY_STACKSEG_END */
/* Define to 1 if using `alloca.c'. */
/* #undef C_ALLOCA */
/* Define to 1 if you have the `alarm' function. */
#define HAVE_ALARM 1
/* Define to 1 if you have `alloca', as a function or macro. */
#define HAVE_ALLOCA 1
/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
*/
#define HAVE_ALLOCA_H 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
#define HAVE_ARPA_INET_H 1
/* define if the Boost library is available */
#define HAVE_BOOST /**/
/* Define to 1 if you have the `bzero' function. */
#define HAVE_BZERO 1
/* Define to 1 if you have the `clock_gettime' function. */
#define HAVE_CLOCK_GETTIME 1
/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you
don't. */
#define HAVE_DECL_STRERROR_R 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
/* #undef HAVE_DOPRNT */
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the `fork' function. */
#define HAVE_FORK 1
/* Define to 1 if you have the `ftruncate' function. */
#define HAVE_FTRUNCATE 1
/* Define to 1 if you have the `gethostbyname' function. */
#define HAVE_GETHOSTBYNAME 1
/* Define to 1 if you have the `gettimeofday' function. */
#define HAVE_GETTIMEOFDAY 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* define if libevent is available */
/* #undef HAVE_LIBEVENT */
/* Define to 1 if you have the <libintl.h> header file. */
#define HAVE_LIBINTL_H 1
/* Define to 1 if you have the `pthread' library (-lpthread). */
#define HAVE_LIBPTHREAD 1
/* Define to 1 if you have the `rt' library (-lrt). */
#define HAVE_LIBRT 1
/* Define to 1 if you have the `socket' library (-lsocket). */
/* #undef HAVE_LIBSOCKET */
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
/* Define to 1 if your system has a GNU libc compatible `malloc' function, and
to 0 otherwise. */
#define HAVE_MALLOC 1
/* Define to 1 if you have the <malloc.h> header file. */
#define HAVE_MALLOC_H 1
/* Define to 1 if you have the `memmove' function. */
#define HAVE_MEMMOVE 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `memset' function. */
#define HAVE_MEMSET 1
/* Define to 1 if you have the `mkdir' function. */
#define HAVE_MKDIR 1
/* Define to 1 if you have the <netdb.h> header file. */
#define HAVE_NETDB_H 1
/* Define to 1 if you have the <netinet/in.h> header file. */
#define HAVE_NETINET_IN_H 1
/* Define to 1 if you have the <openssl/rand.h> header file. */
#define HAVE_OPENSSL_RAND_H 1
/* Define to 1 if you have the <openssl/ssl.h> header file. */
#define HAVE_OPENSSL_SSL_H 1
/* Define to 1 if you have the <openssl/x509v3.h> header file. */
#define HAVE_OPENSSL_X509V3_H 1
/* Define to 1 if you have the <pthread.h> header file. */
#define HAVE_PTHREAD_H 1
/* Define to 1 if the system has the type `ptrdiff_t'. */
#define HAVE_PTRDIFF_T 1
/* Define to 1 if your system has a GNU libc compatible `realloc' function,
and to 0 otherwise. */
#define HAVE_REALLOC 1
/* Define to 1 if you have the `realpath' function. */
#define HAVE_REALPATH 1
/* Define to 1 if you have the `sched_get_priority_max' function. */
#define HAVE_SCHED_GET_PRIORITY_MAX 1
/* Define to 1 if you have the `sched_get_priority_min' function. */
#define HAVE_SCHED_GET_PRIORITY_MIN 1
/* Define to 1 if you have the <sched.h> header file. */
#define HAVE_SCHED_H 1
/* Define to 1 if you have the `select' function. */
#define HAVE_SELECT 1
/* Define to 1 if you have the `socket' function. */
#define HAVE_SOCKET 1
/* Define to 1 if you have the `sqrt' function. */
#define HAVE_SQRT 1
/* Define to 1 if `stat' has the bug that it succeeds when given the
zero-length file name argument. */
/* #undef HAVE_STAT_EMPTY_STRING_BUG */
/* Define to 1 if stdbool.h conforms to C99. */
#define HAVE_STDBOOL_H 1
/* Define to 1 if you have the <stddef.h> header file. */
#define HAVE_STDDEF_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the `strchr' function. */
#define HAVE_STRCHR 1
/* Define to 1 if you have the `strdup' function. */
#define HAVE_STRDUP 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the `strerror_r' function. */
#define HAVE_STRERROR_R 1
/* Define to 1 if you have the `strftime' function. */
#define HAVE_STRFTIME 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strstr' function. */
#define HAVE_STRSTR 1
/* Define to 1 if you have the `strtol' function. */
#define HAVE_STRTOL 1
/* Define to 1 if you have the `strtoul' function. */
#define HAVE_STRTOUL 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/poll.h> header file. */
#define HAVE_SYS_POLL_H 1
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/select.h> header file. */
#define HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/un.h> header file. */
#define HAVE_SYS_UN_H 1
/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */
#define HAVE_SYS_WAIT_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the `vfork' function. */
#define HAVE_VFORK 1
/* Define to 1 if you have the <vfork.h> header file. */
/* #undef HAVE_VFORK_H */
/* Define to 1 if you have the `vprintf' function. */
#define HAVE_VPRINTF 1
/* Define to 1 if you have the <wchar.h> header file. */
#define HAVE_WCHAR_H 1
/* Define to 1 if `fork' works. */
#define HAVE_WORKING_FORK 1
/* Define to 1 if `vfork' works. */
#define HAVE_WORKING_VFORK 1
/* define if zlib is available */
#define HAVE_ZLIB /**/
/* Define to 1 if the system has the type `_Bool'. */
/* #undef HAVE__BOOL */
/* Possible value for SIGNED_RIGHT_SHIFT_IS */
#define LOGICAL_RIGHT_SHIFT 2
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
slash. */
#define LSTAT_FOLLOWS_SLASHED_SYMLINK 1
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "thrift"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME "thrift"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "thrift 0.9.1"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "thrift"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "0.9.1"
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
/* Define to the type of arg 1 for `select'. */
#define SELECT_TYPE_ARG1 int
/* Define to the type of args 2, 3 and 4 for `select'. */
#define SELECT_TYPE_ARG234 (fd_set *)
/* Define to the type of arg 5 for `select'. */
#define SELECT_TYPE_ARG5 (struct timeval *)
/* Indicates the effect of the right shift operator on negative signed
integers */
#define SIGNED_RIGHT_SHIFT_IS 1
/* If using the C implementation of alloca, define if you know the
direction of stack growth for your system; otherwise it will be
automatically deduced at runtime.
STACK_DIRECTION > 0 => grows toward higher addresses
STACK_DIRECTION < 0 => grows toward lower addresses
STACK_DIRECTION = 0 => direction of growth unknown */
/* #undef STACK_DIRECTION */
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if strerror_r returns char *. */
#define STRERROR_R_CHAR_P 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define TIME_WITH_SYS_TIME 1
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
/* #undef TM_IN_SYS_TIME */
/* Possible value for SIGNED_RIGHT_SHIFT_IS */
#define UNKNOWN_RIGHT_SHIFT 3
/* experimental --enable-boostthreads that replaces POSIX pthread by
boost::thread */
/* #undef USE_BOOST_THREAD */
/* Version number of package */
#define VERSION "0.9.1"
/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a
`char[]'. */
#define YYTEXT_POINTER 1
/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT32_T */
/* Define for Solaris 2.5.1 so the uint64_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT64_T */
/* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT8_T */
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to the type of a signed integer type of width exactly 16 bits if
such a type exists and the standard includes do not define it. */
/* #undef int16_t */
/* Define to the type of a signed integer type of width exactly 32 bits if
such a type exists and the standard includes do not define it. */
/* #undef int32_t */
/* Define to the type of a signed integer type of width exactly 64 bits if
such a type exists and the standard includes do not define it. */
/* #undef int64_t */
/* Define to the type of a signed integer type of width exactly 8 bits if such
a type exists and the standard includes do not define it. */
/* #undef int8_t */
/* Define to rpl_malloc if the replacement function should be used. */
/* #undef malloc */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef mode_t */
/* Define to `long int' if <sys/types.h> does not define. */
/* #undef off_t */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef pid_t */
/* Define to rpl_realloc if the replacement function should be used. */
/* #undef realloc */
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef size_t */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef ssize_t */
/* Define to the type of an unsigned integer type of width exactly 16 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint16_t */
/* Define to the type of an unsigned integer type of width exactly 32 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint32_t */
/* Define to the type of an unsigned integer type of width exactly 64 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint64_t */
/* Define to the type of an unsigned integer type of width exactly 8 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint8_t */
/* Define as `fork' if `vfork' does not work. */
/* #undef vfork */
/* Define to empty if the keyword `volatile' does not work. Warning: valid
code using `volatile' can become incorrect without. Disable with care. */
/* #undef volatile */
#endif

View File

@ -1,90 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef _THRIFT_WINDOWS_CONFIG_H_
#define _THRIFT_WINDOWS_CONFIG_H_ 1
#if defined(_MSC_VER) && (_MSC_VER > 1200)
#pragma once
#endif // _MSC_VER
#ifndef _WIN32
#error This is a MSVC header only.
#endif
// use std::thread in MSVC11 (2012) or newer
#if _MSC_VER >= 1700
# define USE_STD_THREAD 1
// otherwise use boost threads
#else
# define USE_BOOST_THREAD 1
#endif
#ifndef TARGET_WIN_XP
# define TARGET_WIN_XP 1
#endif
#if TARGET_WIN_XP
# ifndef WINVER
# define WINVER 0x0501
# endif
# ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0501
# endif
#endif
#ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0601
#endif
#pragma warning(disable: 4996) // Deprecated posix name.
#define VERSION "1.0.0-dev"
#define HAVE_GETTIMEOFDAY 1
#define HAVE_SYS_STAT_H 1
#ifdef HAVE_STDINT_H
# include <stdint.h>
#else
# include <boost/cstdint.hpp>
typedef boost::int64_t int64_t;
typedef boost::uint64_t uint64_t;
typedef boost::int32_t int32_t;
typedef boost::uint32_t uint32_t;
typedef boost::int16_t int16_t;
typedef boost::uint16_t uint16_t;
typedef boost::int8_t int8_t;
typedef boost::uint8_t uint8_t;
#endif
#include <thrift/transport/PlatformSocket.h>
#include <thrift/windows/GetTimeOfDay.h>
#include <thrift/windows/Operators.h>
#include <thrift/windows/TWinsockSingleton.h>
#include <thrift/windows/WinFcntl.h>
#include <thrift/windows/SocketPair.h>
// windows
#include <Winsock2.h>
#include <ws2tcpip.h>
#pragma comment(lib, "Ws2_32.lib")
#pragma comment(lib, "advapi32.lib") //For security APIs in TPipeServer
#endif // _THRIFT_WINDOWS_CONFIG_H_

View File

@ -1,299 +0,0 @@
#!/usr/bin/python
##
## Bulkloader script by Martin Thomas
##
import os, sys, glob, shutil, xml.dom.minidom
import getopt
import logging
import time
logger = logging.getLogger()
shdlr = logging.StreamHandler()
fhdlr = logging.FileHandler(filename='bulkload.log' )
formatter = logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')
shdlr.setFormatter(formatter)
fhdlr.setFormatter(formatter)
logger.addHandler(shdlr)
logger.addHandler(fhdlr)
## only report INFO or higher - change to WARNING to silence all logging
logger.setLevel(logging.INFO)
def usage():
print """
Bulkload.py is intended to automate the manual steps required to load the database and build indexes from scratch.
- ipcs-pat will be built if missing
- cpimport will be removed and rebuilt
- PrimProc will be stopped and started
- shared memory sgements wil be removed using ipcs-pat
- database files will be removed
- dbgen will be run with option 5
- oid files and job files will be copied to correct locations
- column data will be parsed and loaded using Job 299
- index data will be exported, sorted and loaded using Job 300
Options:
-w or --wedir= : Specify the write engine branch to use instead of the default trunk
-n or --nocache= : Specify either col or idx and the -c flag will NOT be sent to cpimport
-u or --usage : Usage message
Example:
bulkload.py -w/home/adevelop/genii/we1.1 --nocache=idx
Load the database using the we1.1 branch for writeengine and do not use cache when building indexes
THIS SPACE LEFT INTENTIONALLY BLANK
"""
def find_paths():
"""Find DBRoot and BulkRoot."""
try:
config_file = os.environ['COLUMNSTORE_CONFIG_FILE']
except KeyError:
try:
logger.info("Environment variable COLUMNSTORE_CONFIG_FILE not set, looking for system Columnstore.xml")
config_file = '/usr/local/mariadb/columnstore/etc/Columnstore.xml'
os.lstat(config_file)
except:
logger.error('No config file available')
sys.exit('No config file available')
try:
xmldoc = xml.dom.minidom.parse(config_file)
bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0]
db_node = xmldoc.getElementsByTagName('DBRoot1')[0]
bulk_dir = bulk_node.childNodes[0].nodeValue
data_dir = db_node.childNodes[0].nodeValue
except Exception, e:
logger.error('Error parsing config file')
logger.error(e)
sys.exit('Error parsing config file')
return (bulk_dir, data_dir)
def check_dirs(bulkroot, dbroot):
problem = 0
res = 0
reqd_dirs = {
os.getenv('HOME')+'/genii' : "No genii directory found (contains tools required to continue) (%s)",
bulkroot: "Bulkroot specified as %s but not found",
bulkroot+'/job': "No job directory found - needed to store Job xml files (looked in %s)",
bulkroot+'/data/import': "No data/import directory found - expected %s to hold data to be loaded",
bulkroot+'/log': "No data/log directory found - expected %s to log into",
dbroot : "DBroot specified as %s but not found"
}
for dir in reqd_dirs.keys():
try:
res = os.lstat(dir)
except:
problem = 1
logger.error(reqd_dirs[dir]%dir)
if problem:
sys.exit(1)
def fix_hwm(job_file):
"""Find hwm in xml file and change to 0"""
import re
src_file = open(job_file, 'r')
dst_file = open(job_file+'.tmp', 'w')
rep = re.compile('hwm="1"')
for line in src_file:
line = rep.sub('hwm="0"', line)
dst_file.write(line)
# use os.rename instead of shutil.move to avoid problems traversing devices
os.rename(job_file+'.tmp', job_file)
def find_indexes(job_file):
"""Find index definitions in job_file and return list of files to sort"""
index_files = []
try: # try because we may have an old version of python
xmldoc = xml.dom.minidom.parse(job_file)
for index_node in xmldoc.getElementsByTagName('Index'):
index_files.append(index_node.getAttribute('mapName'))
except:
import re
f = open(job_file)
for line in f.read():
b =re.search('mapName="(CPL_[0-9A-Z_]+)"', line)
try: # try because not every line will match
index_files.append(b.group(1))
except: pass
return index_files
def exec_cmd(cmd, args):
"""Execute command using subprocess module or if that fails,
use os.system
"""
try:
import subprocess
try:
retcode = call(cmd + " "+args, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
sys.exit(-1)
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
sys.exit(-1)
except:
logger.info ('Old version of Python - subprocess not available, falling back to os.system')
logger.info ('Executing: '+cmd+' '+args)
res = os.system(cmd+' '+args)
if res:
logger.error('Bad return code %i from %s'%(res, cmd))
sys.exit( res )
def build_tool(tool):
"""
Use the tool dictionary to determine if required tool exists
and build if not
"""
if not os.path.exists(tool['path']+tool['tool']):
logger.warn ("Building %s before continuing"%tool['tool'])
curdir=os.getcwd()
os.chdir(tool['path'])
exec_cmd(tool['builder'], tool['args'])
os.chdir(curdir)
def main():
"""
Bulk load the database..
Check that we can write OIDfiles, that all required tools exist,
clean up old files, sort the index inserts and generally rock and roll
"""
start_dir = curdir=os.getcwd() # remember where we started
if not os.environ.has_key('LD_LIBRARY_PATH'):
logger.info('No environment variable LD_LIBRARY_PATH')
else:
if len(os.getenv('LD_LIBRARY_PATH'))<5:
logger.info('Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH'))
#-- figure out paths
home = os.getenv('HOME')
genii = home+'/genii'
cache = {}
cache['idx'] = '-c'
cache['col'] = '-c'
#-- allow us to specify a write engine branch
opts, args = getopt.getopt(sys.argv[1:], 'w:n:u', ['wedir=', 'nocache=', 'usage'])
wedir = genii+'/writeengine'
for opt, arg in opts:
if opt =='-w' or opt =='--wedir':
wedir = arg
if opt == '-n' or opt == '--nocache':
if (arg=='idx' or arg=='col'):
cache[arg] = ''
logger.info("No cache for %s"% arg)
if opt == '-u' or opt == '--usage':
usage()
sys.exit()
logger.info("Using writengine at %s"%wedir)
(bulkroot, dbroot) = find_paths()
logger.info ("Bulkroot: %s \tDBRoot: %s\n"%(bulkroot, dbroot))
check_dirs(bulkroot, dbroot)
if len(glob.glob(bulkroot+'/data/import/*tbl')) == 0:
sys.exit("No files for import found in BulkRoot: %s"%(bulkroot))
if len(glob.glob(dbroot+'/000.dir'))==0:
logger.info("No files found in DBRoot: %s (not fatal)"%dbroot)
## force rebuild cpimport and build ipcs-pat if required
build_tool({'path':genii+'/versioning/BRM/',
'tool':'ipcs-pat',
'builder':'make', 'args':'tools'})
build_tool({'path':wedir+'/bulk/',
'tool':'cpimport',
'builder':'make', 'args':'clean'})
try:
exec_cmd('rm -f', wedir+'/bulk/cpimport')
except:
pass
try:
os.lstat(start_dir+'/cpimport') # look in local directory first
except:
build_tool({'path':wedir+'/bulk/',
'tool':'cpimport',
'builder':'make', 'args':'cpimport'})
## clean up before starting
## remove old db files, removed old temp files, remove shared memory segments,
## kill old PrimProc and start new one
logger.info ("Removing old DB files")
exec_cmd('rm -fr ', dbroot+'/000.dir')
logger.info ("Removing old temp files")
exec_cmd('rm -fr ', bulkroot+'/data/import/*.idx.txt')
logger.info ("Removing old process files")
exec_cmd('rm -fr ', bulkroot+'/process/*.*')
logger.info("Killing primProc")
os.system('killall -q -u $USER PrimProc')
logger.info ("kill controllernode and workernode")
exec_cmd(genii+'/export/bin/dbrm', "stop ")
time.sleep(2);
logger.info ("Removing shared memory segments")
exec_cmd(genii+'/versioning/BRM/ipcs-pat', '-d')
logger.info("Starting controllernode workernode")
exec_cmd(genii+'/export/bin/dbrm', "start ")
logger.info("Starting primProc")
exec_cmd(genii+'/export/bin/PrimProc', "> primproc.log &")
## run dbbuilder - add yes command at front to automatically answer questions
logger.info ("Building db and indexes (no data inserted)")
exec_cmd('yes | '+genii+'/tools/dbbuilder/dbbuilder', ' 5')
logger.info ("Relocating OID files")
for xmlfile in glob.glob('./Job*xml'):
logger.info ("Copying %s to %s\n"%(xmlfile, bulkroot+'/job'))
# use os.rename instead of shutil.move to avoid problems traversing devices
os.rename(xmlfile, bulkroot+'/job/'+xmlfile)
logger.info("Using cpimport at %s"%(wedir+'/bulk/cpimport'))
exec_cmd('time '+wedir+'/bulk/cpimport', '-j 299 ')
exec_cmd(wedir+'/bulk/cpimport', '-c -j 300 ' )
## the following line allows either interactive use or module import
if __name__=="__main__": main()

View File

@ -1,121 +0,0 @@
#!/bin/bash
#This is the procedure for running bulkload using cpimport program
#Usage of this program :
#The necessary input parameter is the schema name
#For example: bulkload.sh TPCH
#A table name and a Job ID can be entered by user when it is prompted or they can be skipped by hitting enter key
#When the table name is skipped, ALL of the columns and index in ALL of the tables in the schema will be loaded
#When table name is entered, All of the columns and indexes in the entered table will be loaded
#Job ID will determine the names of the two xml files. For example, job id 100 will generate Job_100.xml for columns and Job_101 for index xml file. Job id for index xml file is the entered job id +1
#if the job id is skipped, the default job ids are 299 and 300 for column and index files
#There are two xml files will be generated which reside in bulkroot directory under subdirectory job
#For example, the job directory may look like /usr/local/mariadb/columnstore/test/bulk/job
# Set up a default search path.
PROG_NAME=$(basename $0)
SUFFIX=.tbl
TABLENAME=""
while getopts 't:j:e:s:d:p:n:u:h' OPTION
do
case ${OPTION} in
s) Schema=${OPTARG};;
t) TABLENAME=${OPTARG};;
j) JOBID=${OPTARG};;
e) MAXERROR=${OPTARG};;
p) DESC=${OPTARG};;
d) DELIMITER=${OPTARG};;
n) NAME=${OPTARG};;
u) USER=${OPTARG};;
h) echo "Options: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name -u user]"
exit 2;;
\?) echo "Options: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -s description -d delimiter -n name -u user]"
exit 2;;
esac
done
#generate column xml file
echo "MAXERROR in $PROG_NAME =" $MAXERROR
echo "JOBID in $PROG_NAME =" $JOBID
echo "Schema is " $Schema
echo "DESC is " $DESC
echo "DELIMITER =" $DELIMITER
echo "TABLENAME is " $TABLENAME
echo "NAME is " $NAME
if [ -n "$TABLENAME" ]; then
./colxml $Schema -t $TABLENAME -j $JOBID -d $DELIMITER -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER
if [ "$?" <> "0" ]; then
echo "Error in colxml !" 1>&2
exit 1
fi
command="colxml $Schema -t $TABLENAME -j $JOBID -d $DELIMITER -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" "
echo $command
else
./colxml $Schema -j $JOBID -d $DELIMITER -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER
if [ "$?" <> "0" ]; then
echo "Error in colxml !" 1>&2
exit 1
fi
command="colxml $Schema -j $JOBID -d "$DELIMITER" -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" "
echo $command
fi
#generate index xml file
DESC="table index definition"
NAME="index definitions for tables in $Schema"
let "JOBID2 = JOBID+1"
echo "DEFAULT INDEX JOB ID is " $JOBID2
if [ -n "$TABLENAME" ]; then
./indxml $Schema -t $TABLENAME -j $JOBID2 -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER
if [ "$?" <> "0" ]; then
echo "Error in indxml !" 1>&2
exit 1
fi
command="indxml $Schema -t $TABLENAME -j $JOBID2 -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" "
echo $command
else
./indxml $Schema -j $JOBID2 -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER
if [ "$?" <> "0" ]; then
echo "Error in colxml !" 1>&2
exit 1
fi
command="indxml $Schema -j $JOBID2 -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" "
echo $command
fi
#get bulkroot
if [ -n "$CALPONT_CONFIG_FILE" ]; then
echo "CALPONT_CONFIG_FILE=" $CALPONT_CONFIG_FILE
elif [ -z "$CALPONT_CONFIG_FILE"]; then
CALPONT_CONFIG_FILE="/usr/local/mariadb/columnstore/etc/Columnstore.xml"
echo "CALPONT_CONFIG_FILE=" $CALPONT_CONFIG_FILE
else
CALPONT_CONFIG_FILE="/usr/local/mariadb/columnstore/etc/Columnstore.xml"
echo "CALPONT_CONFIG_FILE=" $CALPONT_CONFIG_FILE
fi
awk '/BulkRoot/ { sub(/<BulkRoot>/,"",$0); sub(/<\/BulkRoot>/,"",$0); sub(/" "/,"",$0);print $0 > "tmp.txt"}' $CALPONT_CONFIG_FILE
sed -e 's/ *//g' tmp.txt > out.txt
BulkRoot=$(cat out.txt)
echo "BulkRoot=" $BulkRoot
rm -rf out.txt tmp.txt
#bulk load column files
./cpimport -j $JOBID
command="cpimport -j $JOBID"
echo $command
#bulk load parallel index files
#./splitidx -j $JOBID2
#IDX_SHELL_SCRIPT="$BulkRoot/process/Job_$JOBID2.sh"
#chmod +x $IDX_SHELL_SCRIPT
#echo " run parallel loading $IDX_SHELL_SCRIPT"
#$IDX_SHELL_SCRIPT

View File

@ -1,93 +0,0 @@
#!/usr/bin/python
import os, sys, glob, shutil, xml.dom.minidom
def find_paths():
"""Find DBRoot and BulkRoot."""
try:
config_file = os.environ['COLUMNSTORE_CONFIG_FILE']
except KeyError:
try:
config_file = '/usr/local/mariadb/columnstore/etc/Columnstore.xml'
os.lstat(config_file)
except:
sys.exit('No config file available')
xmldoc = xml.dom.minidom.parse(config_file)
bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0]
db_node = xmldoc.getElementsByTagName('DBRoot')[0]
bulk_dir = bulk_node.childNodes[0].nodeValue
data_dir = db_node.childNodes[0].nodeValue
return (bulk_dir, data_dir)
def validate_indexes(job_file):
index_files = []
xmldoc = xml.dom.minidom.parse(job_file)
for index_node in xmldoc.getElementsByTagName('Index'):
curTreeOid = index_node.getAttribute('iTreeOid')
curListOid = index_node.getAttribute('iListOid')
curMapOid = index_node.getAttribute('mapOid')
#curIdxCmdArg = ' -t ' + curTreeOid + ' -l ' + curListOid + ' -v -c ' + curMapOid + ' > idxCol_' + curMapOid+'.out'
curIdxCmdArg = ' -t %s -l %s -v -c %s > idxCol_%s.out' % (curTreeOid, curListOid, curMapOid, curMapOid)
index_files.append( curIdxCmdArg )
return index_files
def exec_cmd(cmd, args):
"""Execute command using subprocess module or if that fails,
use os.system
"""
try:
import subprocess
try:
retcode = call(cmd + " "+args, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
sys.exit(-1)
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
sys.exit(-1)
except:
res = os.system(cmd+' '+args)
if res:
sys.exit( res )
def main():
"""
Validate indexes..
"""
if len(os.getenv('LD_LIBRARY_PATH'))<5:
print 'Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH')
home = os.getenv('HOME')
genii = home+'/genii'
(bulkroot, dbroot) = find_paths()
if len(glob.glob(bulkroot+'/job/Job_300.xml')) == 0:
sys.exit("No Job_300.xml exist ")
indexes = validate_indexes(bulkroot+'/job/Job_300.xml')
for idxCmdArg in indexes:
print idxCmdArg
exec_cmd( genii + '/tools/evalidx/evalidx', idxCmdArg )
## the following line allows either interactive use or module import
if __name__=="__main__": main()

View File

@ -1,93 +0,0 @@
#!/bin/bash
#This is the procedure for running bulkload using cpimport program
#Usage of this program :
#The necessary input parameter is the schema name
#For example: bulkload.sh TPCH
#A table name and a Job ID can be entered by user when it is prompted or they can be skipped by hitting enter key
#When the table name is skipped, ALL of the columns and index in ALL of the tables in the schema will be loaded
#When table name is entered, All of the columns and indexes in the entered table will be loaded
#Job ID will determine the names of the two xml files. For example, job id 100 will generate Job_100.xml for columns and Job_101 for index xml file. Job id for index xml file is the entered job id +1
#if the job id is skipped, the default job ids are 299 and 300 for column and index files
#There are two xml files will be generated which reside in bulkroot directory under subdirectory job
#For example, the job directory may look like /usr/local/mariadb/columnstore/test/bulk/job
# Set up a default search path.
#echo "This is Script name " $0
PROG_NAME=$(basename $0)
USERNAME=`grep "^${USER}:" /etc/passwd | cut -d: -f5`
JOBID=""
TABLENAME=""
Schema=""
DELIMITER="|"
MAXERROR=10
FORMAT=CSV
DESC="table columns definition"
NAME="table columns definition"
while getopts 't:j:e:s:d:p:n:hu' OPTION
do
case ${OPTION} in
s) Schema=${OPTARG};;
t) TABLENAME=${OPTARG};;
j) JOBID=${OPTARG};;
e) MAXERROR=${OPTARG};;
p) DESC=${OPTARG};;
d) DELIMITER=${OPTARG};;
n) NAME=${OPTARG};;
h) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]"
exit 2;;
u) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]"
exit 2;;
\?) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]"
exit 2;;
esac
done
if [ -n "$Schema" ]; then
echo "Schema is " $Schema
else
echo "Error using the script, a schema is needed! "
echo "usage as follows: "
echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]"
echo "PLEASE ONLY INPUT SCHEMA NAME:"
read Schema
if [ -n "$Schema" ]; then
echo "Schema is " $Schema
else
echo "Error using the script, a schema is needed! "
echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]"
echo "Try again! Goodbye!"
exit 2;
fi
fi
NAME="column definitions for tables in $Schema"
if [ -n "$JOBID" ]; then
echo "INPUT JOB ID is " $JOBID
else
echo "Error using the script, a jobid is needed! "
echo "PLEASE INPUT jobid:"
read JOBID
if [ -n "$JOBID" ]; then
echo "JOBID is " $JOBID
else
echo "Error using the script, a jobid is needed! "
echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -s description -d delimiter -e max_error_rows -n name ]"
echo "Try again! Goodbye!"
exit 2;
fi
fi
################################################################################
if [ -n "$TABLENAME" ]; then
./bulkloadp.sh -e $MAXERROR -s $Schema -t "$TABLENAME" -j $JOBID -p "$DESC" -d "$DELIMITER" -n "$NAME" -u $USER
else
./bulkloadp.sh -e $MAXERROR -s $Schema -j $JOBID -d "$DELIMITER" -p "$DESC" -n "$NAME" -u $USER
fi

View File

@ -1,95 +0,0 @@
#!/bin/bash
#This is the procedure for running bulkload using cpimport program
#Usage of this program :
#The necessary input parameter is the schema name
#For example: bulkload.sh TPCH
#A table name and a Job ID can be entered by user when it is prompted or they can be skipped by hitting enter key
#When the table name is skipped, ALL of the columns and index in ALL of the tables in the schema will be loaded
#When table name is entered, All of the columns and indexes in the entered table will be loaded
#Job ID will determine the names of the two xml files. For example, job id 100 will generate Job_100.xml for columns and Job_101 for index xml file. Job id for index xml file is the entered job id +1
#if the job id is skipped, the default job ids are 299 and 300 for column and index files
#There are two xml files will be generated which reside in bulkroot directory under subdirectory job
#For example, the job directory may look like /usr/local/mariadb/columnstore/test/bulk/job
# Set up a default search path.
PATH="$HOME/genii/export/bin:.:/sbin:/usr/sbin:/bin:/usr/bin:/usr/X11R6/bin"
export PATH
#echo "This is Script name " $0
PROG_NAME=$(basename $0)
USERNAME=`grep "^${USER}:" /etc/passwd | cut -d: -f5`
JOBID=""
TABLENAME=""
Schema=""
DELIMITER="|"
MAXERROR=10
FORMAT=CSV
DESC="table columns definition"
NAME="table columns definition"
while getopts 't:j:e:s:d:p:n:hu' OPTION
do
case ${OPTION} in
s) Schema=${OPTARG};;
t) TABLENAME=${OPTARG};;
j) JOBID=${OPTARG};;
e) MAXERROR=${OPTARG};;
p) DESC=${OPTARG};;
d) DELIMITER=${OPTARG};;
n) NAME=${OPTARG};;
h) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]"
exit 2;;
u) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]"
exit 2;;
\?) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]"
exit 2;;
esac
done
if [ -n "$Schema" ]; then
echo "Schema is " $Schema
else
echo "Error using the script, a schema is needed! "
echo "usage as follows: "
echo "Usage: ${PROG_NAME} Schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]"
echo "PLEASE ONLY INPUT SCHEMA NAME:"
read Schema
if [ -n "$Schema" ]; then
echo "Schema is " $Schema
else
echo "Error using the script, a schema is needed! "
echo "Usage: ${PROG_NAME} Schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]"
echo "Try again! Goodbye!"
exit 2;
fi
fi
NAME="column definitions for tables in $Schema"
if [ -n "$JOBID" ]; then
echo "INPUT JOB ID is " $JOBID
else
echo "Error using the script, a jobid is needed! "
echo "PLEASE INPUT jobid:"
read JOBID
if [ -n "$JOBID" ]; then
echo "JOBID is " $JOBID
else
echo "Error using the script, a jobid is needed! "
echo "Usage: ${PROG_NAME} Schema -j jobid [-t TableName -s description -d delimiter -e max_error_rows -n name ]"
echo "Try again! Goodbye!"
exit 2;
fi
fi
################################################################################
if [ -n "$TABLENAME" ]; then
bulkloadp.sh -e $MAXERROR -s $Schema -t "$TABLENAME" -j $JOBID -p "$DESC" -d "$DELIMITER" -n "$NAME" -u $USER
else
bulkloadp.sh -e $MAXERROR -s $Schema -j $JOBID -d "$DELIMITER" -p "$DESC" -n "$NAME" -u $USER
fi

View File

@ -1,3 +0,0 @@
cleanup.sh
dbbuilder.sh
bulkloadp.sh

View File

@ -1,299 +0,0 @@
#!/usr/bin/python
##
## Bulkloader script by Martin Thomas
##
import os, sys, glob, shutil, xml.dom.minidom
import getopt
import logging
logger = logging.getLogger()
shdlr = logging.StreamHandler()
fhdlr = logging.FileHandler(filename='bulkload.log' )
formatter = logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')
shdlr.setFormatter(formatter)
fhdlr.setFormatter(formatter)
logger.addHandler(shdlr)
logger.addHandler(fhdlr)
## only report INFO or higher - change to WARNING to silence all logging
logger.setLevel(logging.INFO)
def usage():
print """
qa-bulkload.py is intended to automate the manual steps required to load the
database and build indexes from scratch.
- PrimProc will be stopped and started
- shared memory sgements wil be removed using ipcs-pat
- database files will be removed
- dbgen will be run with option 5
- oid files and job files will be copied to correct locations
- column data will be parsed and loaded using Job 299
- index data will be exported, sorted and loaded using Job 300
Options:
-n or --nocache= : Specify either col or idx and the -c flag will NOT be sent to cpimport
-u or --usage : Usage message
Example:
bulkload.py --nocache=idx
Load the database, do not use cache when building indexes
THIS SPACE LEFT INTENTIONALLY BLANK
"""
def find_paths():
"""Find DBRoot and BulkRoot."""
try:
config_file = os.environ['COLUMNSTORE_CONFIG_FILE']
except KeyError:
try:
logger.info("Environment variable COLUMNSTORE_CONFIG_FILE not set, looking for system Columnstore.xml")
config_file = '/usr/local/mariadb/columnstore/etc/Columnstore.xml'
os.lstat(config_file)
except:
logger.error('No config file available')
sys.exit('No config file available')
try:
xmldoc = xml.dom.minidom.parse(config_file)
bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0]
db_node = xmldoc.getElementsByTagName('DBRoot')[0]
bulk_dir = bulk_node.childNodes[0].nodeValue
data_dir = db_node.childNodes[0].nodeValue
except Exception, e:
logger.error('Error parsing config file')
logger.error(e)
sys.exit('Error parsing config file')
return (bulk_dir, data_dir)
def check_dirs(bulkroot, dbroot):
problem = 0
res = 0
reqd_dirs = {
os.getenv('HOME')+'/genii' : "No genii directory found (contains tools required to continue) (%s)",
bulkroot: "Bulkroot specified as %s but not found",
bulkroot+'/job': "No job directory found - needed to store Job xml files (looked in %s)",
bulkroot+'/data/import': "No data/import directory found - expected %s to hold data to be loaded",
bulkroot+'/log': "No data/log directory found - expected %s to log into",
dbroot : "DBroot specified as %s but not found"
}
for dir in reqd_dirs.keys():
try:
res = os.lstat(dir)
except:
problem = 1
logger.error(reqd_dirs[dir]%dir)
if problem:
sys.exit(1)
def fix_hwm(job_file):
"""Find hwm in xml file and change to 0"""
import re
src_file = open(job_file, 'r')
dst_file = open(job_file+'.tmp', 'w')
rep = re.compile('hwm="1"')
for line in src_file:
line = rep.sub('hwm="0"', line)
dst_file.write(line)
# use os.rename instead of shutil.move to avoid problems traversing devices
os.rename(job_file+'.tmp', job_file)
def find_indexes(job_file):
"""Find index definitions in job_file and return list of files to sort"""
index_files = []
try: # try because we may have an old version of python
xmldoc = xml.dom.minidom.parse(job_file)
for index_node in xmldoc.getElementsByTagName('Index'):
index_files.append(index_node.getAttribute('mapName'))
except:
import re
f = open(job_file)
for line in f.read():
b =re.search('mapName="(CPL_[0-9A-Z_]+)"', line)
try: # try because not every line will match
index_files.append(b.group(1))
except: pass
return index_files
def exec_cmd(cmd, args):
"""Execute command using subprocess module or if that fails,
use os.system
"""
try:
import subprocess
try:
retcode = call(cmd + " "+args, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
sys.exit(-1)
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
sys.exit(-1)
except:
logger.info ('Old version of Python - subprocess not available, falling back to os.system')
logger.info ('Executing: '+cmd+' '+args)
res = os.system(cmd+' '+args)
if res:
logger.error('Bad return code %i from %s'%(res, cmd))
sys.exit( res )
def build_tool(tool):
"""
Use the tool dictionary to determine if required tool exists
and build if not
"""
if not os.path.exists(tool['path']+tool['tool']):
logger.warn ("Building %s before continuing"%tool['tool'])
curdir=os.getcwd()
os.chdir(tool['path'])
exec_cmd(tool['builder'], tool['args'])
os.chdir(curdir)
def main():
"""
Bulk load the database..
Check that we can write OIDfiles, that all required tools exist,
clean up old files, sort the index inserts and generally rock and roll
"""
start_dir = curdir=os.getcwd() # remember where we started
if not os.environ.has_key('LD_LIBRARY_PATH'):
logger.info('No environment variable LD_LIBRARY_PATH')
else:
if len(os.getenv('LD_LIBRARY_PATH'))<5:
logger.info('Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH'))
#-- figure out paths
home = os.getenv('HOME')
cache = {}
cache['idx'] = '-c'
cache['col'] = '-c'
#-- allow us to specify a write engine branch
opts, args = getopt.getopt(sys.argv[1:], 'n:u', ['nocache=', 'usage'])
for opt, arg in opts:
if opt == '-n' or opt == '--nocache':
if (arg=='idx' or arg=='col'):
cache[arg] = ''
logger.info("No cache for %s"% arg)
if opt == '-u' or opt == '--usage':
usage()
sys.exit()
(bulkroot, dbroot) = find_paths()
logger.info ("Bulkroot: %s \tDBRoot: %s\n"%(bulkroot, dbroot))
check_dirs(bulkroot, dbroot)
if len(glob.glob(bulkroot+'/data/import/*tbl')) == 0:
sys.exit("No files for import found in BulkRoot: %s"%(bulkroot))
if len(glob.glob(dbroot+'/000.dir'))==0:
logger.info("No files found in DBRoot: %s (not fatal)"%dbroot)
## qa version does not build any tools. Cease and desist if any tools missing
toolset = ['dbbuilder', 'cpimport', 'ipcs-pat', 'PrimProc']
for tool in toolset:
try:
res = os.system('which %s'%tool)
finally:
if res:
logger.error("Fatal error: %s not found"%tool)
sys.exit(-1)
## clean up before starting
## remove old db files, removed old temp files, remove shared memory segments,
## kill old PrimProc and start new one
logger.info ("Removing old DB files")
exec_cmd('rm -fr ', dbroot+'/000.dir')
logger.info ("Removing old temp files")
exec_cmd('rm -fr ', bulkroot+'/data/import/*.idx.txt')
logger.info ("Removing shared memory segments")
exec_cmd('ipcs-pat', '-d')
logger.info("Killing primProc")
os.system('killall -q -u $USER PrimProc')
logger.info("Starting primProc")
exec_cmd('PrimProc', "> primproc.log &")
## run dbbuilder
logger.info ("Building db and indexes (no data inserted)")
exec_cmd('yes | dbbuilder', ' 5')
logger.info ("Relocating OID files")
for file in ['colOIDFile.dat', 'dicOIDFile.dat', 'indexOIDFile.dat']:
# use os.rename instead of shutil.move to avoid problems traversing devices
os.rename(file, dbroot+'/'+file)
for xmlfile in glob.glob('./Job*xml'):
logger.info ("Copying %s to %s\n"%(xmlfile, bulkroot+'/job'))
# use os.rename instead of shutil.move to avoid problems traversing devices
os.rename(xmlfile, bulkroot+'/job/'+xmlfile)
exec_cmd('time cpimport', '-j 299 -b %s'%cache['col'])
exec_cmd('time cpimport', '-j 299 -l %s'%cache['col'])
exec_cmd('time cpimport', '-j 300 -i -o %s'%cache['idx'])
logger.info("Over-riding HWM in job file - setting to 0")
fix_hwm(bulkroot+'/job/Job_300.xml')
## sort the files after scanning index job file for mapName(s)
logger.info ("Sorting indexes before insertion")
indexes = find_indexes(bulkroot+'/job/Job_300.xml')
for index in indexes:
data_file='%s/data/import/%s.dat.idx.txt'%(bulkroot, index)
sort_file ='%s/data/import/%s.dat.idx.sort'%(bulkroot, index)
exec_cmd('time sort',' -k1 -n %s > %s'%(data_file, sort_file))
# use os.rename instead of shutil.move to avoid problems traversing devices
os.rename( sort_file, data_file)
logger.info("Inserting indexes")
try:
logger.info("Trying with -m option")
exec_cmd('cpimport', '-j 300 -m -i -s %s'%cache['idx'])
except:
try:
logger.warn("cpimport with -m option failed, fall back to regular options")
exec_cmd('cpimport', '-j 300 -i -s %s'%cache['idx'])
except:
logger.error("Index load failed")
## the following line allows either interactive use or module import
if __name__=="__main__": main()