From 97bda78c3b48ff218aada17121bec350e70e007d Mon Sep 17 00:00:00 2001 From: Andrew Hutchings Date: Thu, 5 Sep 2019 18:13:23 +0100 Subject: [PATCH] Move config files This patch: * Moves config files from /usr/local/mariadb/columnstore/etc to ENGINE_SYSCONFDIR/columnstore (ENGINE_SYSCONFDIR is /etc by default) * Sets a define called MCSSYSCONFDIR whic contains the ENGINE_SYSCONFDIR compile time setting * Modifies scripts and code to use the new paths * Removes a whole bunch of files we don't use --- CMakeLists.txt | 29 +- config.h.in | 2 + dbcon/joblist/config-dec.xml | 494 ------------------ oam/etc/CMakeLists.txt | 2 +- oam/install_scripts/CMakeLists.txt | 7 + .../{columnstore => columnstore.in} | 4 +- ...oreInstall.sh => columnstoreInstall.sh.in} | 10 +- ...storeLogRotate => columnstoreLogRotate.in} | 4 +- .../{post-install => post-install.in} | 10 +- .../{postInstall.sh => postInstall.sh.in} | 6 +- .../{pre-uninstall => pre-uninstall.in} | 4 +- .../{syslogSetup.sh => syslogSetup.sh.in} | 2 +- oam/oamcpp/liboamcpp.cpp | 27 +- oam/post/CMakeLists.txt | 2 +- oam/post/test-001.sh | 87 --- oam/post/test-002.sh | 64 --- oam/post/test-003.sh | 66 --- oam/post/test-004.sh | 70 --- .../columnstoreSupport/columnstoreSupport.cpp | 5 +- oamapps/mcsadmin/mcsadmin.cpp | 16 +- oamapps/postConfigure/amazonInstaller.cpp | 8 +- oamapps/postConfigure/helpers.cpp | 3 +- oamapps/postConfigure/installer.cpp | 9 +- oamapps/postConfigure/postConfigure.cpp | 21 +- procmgr/processmanager.cpp | 7 +- procmon/processmonitor.cpp | 3 +- storage-manager/CMakeLists.txt | 2 +- storage-manager/src/Config.cpp | 6 +- tools/configMgt/autoInstaller.cpp | 5 +- tools/configMgt/beetlejuice_installer.sh | 219 -------- .../configMgt/beetlejuice_installer_3rpms.sh | 259 --------- tools/configMgt/configure.cpp | 2 +- tools/configMgt/parent_installer.sh | 365 ------------- tools/dbbuilder/dbbuilder.cpp | 3 +- tools/evalidx/CMakeLists.txt | 47 -- tools/evalidx/checkidx.py | 98 ---- tools/evalidx/evalidx.cpp | 388 -------------- tools/setConfig/CMakeLists.txt | 1 + .../{configxml.sh => configxml.sh.in} | 2 +- utils/configcpp/config.h | 108 ---- utils/configcpp/configcpp.cpp | 4 +- utils/configcpp/writeonce.cpp | 232 -------- utils/infinidb_hadoop/InfiniDB_Hadoop.jar | Bin 53522 -> 0 bytes .../infinidb_hadoop_bulkload.sh | 7 - .../hadoop/db/IDBFileInputFormat.java | 184 ------- .../hadoop/db/InfiniDBConfiguration.java | 356 ------------- .../hadoop/db/InfiniDBInputFormat.java | 442 ---------------- .../hadoop/db/InfiniDBOutputFormat.java | 139 ----- .../hadoop/example/InfiniDBOutputDriver.java | 88 ---- .../hadoop/example/InfiniDoopDriver.java | 90 ---- .../hadoop/example/InfiniDoopInputMapper.java | 43 -- .../hadoop/example/InfiniDoopMapper.java | 45 -- .../hadoop/example/InfiniDoopRecord.java | 63 --- utils/loggingcpp/CMakeLists.txt | 2 +- utils/loggingcpp/idberrorinfo.cpp | 3 +- utils/loggingcpp/message.cpp | 3 +- utils/multicast/config.h | 208 -------- utils/thrift/thrift/config.h | 427 --------------- utils/thrift/thrift/thrift-config.h | 447 +++++++++++++++- utils/thrift/thrift/windows/config.h | 90 ---- writeengine/bulk/bulkload.py | 299 ----------- writeengine/bulk/bulkloadp.sh | 121 ----- writeengine/bulk/checkidx.py | 93 ---- writeengine/bulk/cpimport.sh | 93 ---- writeengine/bulk/dbload_tmplate.sh | 95 ---- writeengine/bulk/dbloadp.sh | 3 - writeengine/bulk/qa-bulkload.py | 299 ----------- 67 files changed, 535 insertions(+), 5808 deletions(-) delete mode 100644 dbcon/joblist/config-dec.xml rename oam/install_scripts/{columnstore => columnstore.in} (94%) rename oam/install_scripts/{columnstoreInstall.sh => columnstoreInstall.sh.in} (91%) rename oam/install_scripts/{columnstoreLogRotate => columnstoreLogRotate.in} (83%) rename oam/install_scripts/{post-install => post-install.in} (97%) rename oam/install_scripts/{postInstall.sh => postInstall.sh.in} (92%) rename oam/install_scripts/{pre-uninstall => pre-uninstall.in} (95%) rename oam/install_scripts/{syslogSetup.sh => syslogSetup.sh.in} (99%) delete mode 100755 oam/post/test-001.sh delete mode 100755 oam/post/test-002.sh delete mode 100755 oam/post/test-003.sh delete mode 100644 oam/post/test-004.sh delete mode 100755 tools/configMgt/beetlejuice_installer.sh delete mode 100755 tools/configMgt/beetlejuice_installer_3rpms.sh delete mode 100755 tools/configMgt/parent_installer.sh delete mode 100644 tools/evalidx/CMakeLists.txt delete mode 100755 tools/evalidx/checkidx.py delete mode 100644 tools/evalidx/evalidx.cpp rename tools/setConfig/{configxml.sh => configxml.sh.in} (96%) delete mode 100644 utils/configcpp/config.h delete mode 100644 utils/configcpp/writeonce.cpp delete mode 100755 utils/infinidb_hadoop/InfiniDB_Hadoop.jar delete mode 100755 utils/infinidb_hadoop/infinidb_hadoop_bulkload.sh delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/db/IDBFileInputFormat.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBConfiguration.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBInputFormat.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBOutputFormat.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDBOutputDriver.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopDriver.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopInputMapper.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopMapper.java delete mode 100755 utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopRecord.java delete mode 100644 utils/multicast/config.h delete mode 100644 utils/thrift/thrift/config.h delete mode 100644 utils/thrift/thrift/windows/config.h delete mode 100644 writeengine/bulk/bulkload.py delete mode 100755 writeengine/bulk/bulkloadp.sh delete mode 100755 writeengine/bulk/checkidx.py delete mode 100755 writeengine/bulk/cpimport.sh delete mode 100755 writeengine/bulk/dbload_tmplate.sh delete mode 100755 writeengine/bulk/dbloadp.sh delete mode 100644 writeengine/bulk/qa-bulkload.py diff --git a/CMakeLists.txt b/CMakeLists.txt index ee1b17d45..f75dea165 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,19 +123,7 @@ ENDIF("${isSystemDir}" STREQUAL "-1") INCLUDE (configureEngine) -# releasenum is used by external scripts for various tasks. Leave it alone. -CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/build/releasenum.in ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum IMMEDIATE) -INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum DESTINATION ${INSTALL_ENGINE} COMPONENT platform) -CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h.in ${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h) -CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) -exec_program("git" - ${CMAKE_CURRENT_SOURCE_DIR} - ARGS "describe --match=NeVeRmAtCh --always --dirty" - OUTPUT_VARIABLE GIT_VERSION) - -CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/gitversionEngine.in ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine IMMEDIATE) -INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine DESTINATION ${INSTALL_ENGINE} COMPONENT platform) FIND_PROGRAM(LEX_EXECUTABLE flex DOC "path to the flex executable") if(NOT LEX_EXECUTABLE) @@ -204,8 +192,7 @@ SET (ENGINE_LDFLAGS "-Wl,--no-as-needed -Wl,--add-needed") SET (ENGINE_LIBDIR "${INSTALL_ENGINE}/lib") SET (ENGINE_BINDIR "${INSTALL_ENGINE}/bin") SET (ENGINE_INCDIR "${INSTALL_ENGINE}/include") -SET (ENGINE_ETCDIR "${INSTALL_ENGINE}/etc") -SET (ENGINE_SYSCONFDIR "${INSTALL_ENGINE}/etc") +SET (ENGINE_SYSCONFDIR "/etc") SET (ENGINE_MANDIR "${INSTALL_ENGINE}/man") SET (ENGINE_SBINDIR "${INSTALL_ENGINE}/sbin") SET (ENGINE_SHAREDIR "${INSTALL_ENGINE}/share") @@ -329,5 +316,19 @@ IF( WITH_SHARED_COMP_TESTS ) ADD_SUBDIRECTORY(writeengine/shared) ENDIF( WITH_SHARED_COMP_TESTS ) +# releasenum is used by external scripts for various tasks. Leave it alone. +CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/build/releasenum.in ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum IMMEDIATE) +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/build/releasenum DESTINATION ${INSTALL_ENGINE} COMPONENT platform) +CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h.in ${CMAKE_CURRENT_SOURCE_DIR}/columnstoreversion.h) +CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) + +exec_program("git" + ${CMAKE_CURRENT_SOURCE_DIR} + ARGS "describe --match=NeVeRmAtCh --always --dirty" + OUTPUT_VARIABLE GIT_VERSION) + +CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/gitversionEngine.in ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine IMMEDIATE) +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/gitversionEngine DESTINATION ${INSTALL_ENGINE} COMPONENT platform) + INCLUDE(cpackEngineRPM) INCLUDE(cpackEngineDEB) diff --git a/config.h.in b/config.h.in index 74d707b11..5021ab00a 100644 --- a/config.h.in +++ b/config.h.in @@ -387,4 +387,6 @@ code using `volatile' can become incorrect without. Disable with care. */ #cmakedefine volatile +#define MCSSYSCONFDIR "${ENGINE_SYSCONFDIR}" + #endif diff --git a/dbcon/joblist/config-dec.xml b/dbcon/joblist/config-dec.xml deleted file mode 100644 index 56f9c32d9..000000000 --- a/dbcon/joblist/config-dec.xml +++ /dev/null @@ -1,494 +0,0 @@ - - - - - 5 - 1 - 100 - 1 - 100 - 200 - 400 - 1 - 31 - 10 - 1024 - 1024 - 1000 - 1000 - 1 - - - PMS1 - PMS2 - PMS3 - PMS4 - - - PMS5 - PMS6 - PMS7 - PMS8 - - - PMS9 - PMS10 - PMS11 - PMS12 - - - PMS13 - PMS14 - PMS15 - PMS16 - - - PMS17 - PMS18 - PMS19 - PMS20 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - 127.0.0.1 - 18620 - - - /home/rdempsey/Calpont/etc/MessageFile.txt - - - /var/tmp - - - 127.0.0.1 - 18601 - 50 - 100 - - - 127.0.0.1 - 18601 - 50 - 100 - - - 127.0.0.1 - 18602 - - - 127.0.0.1 - 18604 - - - 127.0.0.1 - 18606 - - - 127.0.0.1 - 18630 - - - 127.0.0.1 - 18632 - - - 127.0.0.1 - 18634 - - - 127.0.0.1 - 18636 - - - 127.0.0.1 - 18638 - - - 127.0.0.1 - 18640 - - - 127.0.0.1 - 18642 - - - 127.0.0.1 - 18644 - - - 127.0.0.1 - 18650 - - - 127.0.0.1 - 18612 - - - 127.0.0.1 - 18614 - - - 127.0.0.1 - 18625 - - - V1.0.0.0 - 5 - 3 - 60 - /home/rdempsey/Calpont/data1 - - /home/rdempsey/Calpont/dbrm/BRM_saves - 90 - 80 - 70 - dm1 - - - - 2048MB - 128MB - 2048MB - 32 - /var/tmp - - - - 0 - - - - - NONE - NONE - - - dm - Director Module - 1 - 127.0.0.1 - Unassigned - 90 - 80 - 70 - 60 - 90 - 80 - 70 - 90 - 80 - 70 - 90 - 80 - 70 - / - um - User Module - 1 - 127.0.0.1 - Unassigned - 90 - 80 - 70 - 60 - 90 - 80 - 70 - 90 - 80 - 70 - 90 - 80 - 70 - / - pm - Performance Module - 1 - 127.0.0.1 - Unassigned - 90 - 80 - 70 - 60 - 90 - 80 - 70 - 90 - 80 - 70 - 90 - 80 - 70 - / - mm - Management Module - 0 - 127.0.0.1 - Unassigned - 90 - 80 - 70 - 60 - 90 - 80 - 70 - 90 - 80 - 70 - 90 - 80 - 70 - / - - - es - Ethernet Switch - 1 - 127.0.0.1 - Unassigned - fs - Fiber Channel Switch - 1 - 127.0.0.1 - Unassigned - - - cu - Controller Unit - 1 - 127.0.0.1 - Unassigned - eu - Expansion Unit - 1 - 127.0.0.1 - Unassigned - - - 1000 - /tmp/CalpontShm - /home/rdempsey/Calpont/dbrm/SMTxnID - - - 4 - 1073741824 - 0 - 1 - 2 - 3 - - - - /home/rdempsey/Calpont/dbrm/oidbitmap - 3000 - - - Commit - - - /home/rdempsey/Calpont/bulk - - - - - 1 - 127.0.0.1 - 60310 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - 127.0.0.1 - 60311 - dm1 - - - diff --git a/oam/etc/CMakeLists.txt b/oam/etc/CMakeLists.txt index 0470ac0ae..dfd820c34 100644 --- a/oam/etc/CMakeLists.txt +++ b/oam/etc/CMakeLists.txt @@ -3,4 +3,4 @@ install(FILES AlarmConfig.xml Columnstore.xml ProcessConfig.xml ConsoleCmds.xml - DESTINATION ${ENGINE_ETCDIR} COMPONENT platform) + DESTINATION ${ENGINE_SYSCONFDIR}/columnstore COMPONENT platform) diff --git a/oam/install_scripts/CMakeLists.txt b/oam/install_scripts/CMakeLists.txt index 4f80ae0b0..75807ea3b 100644 --- a/oam/install_scripts/CMakeLists.txt +++ b/oam/install_scripts/CMakeLists.txt @@ -1,3 +1,10 @@ +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/syslogSetup.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/syslogSetup.sh" @ONLY) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/post-install.in" "${CMAKE_CURRENT_SOURCE_DIR}/post-install" @ONLY) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/columnstore.in" "${CMAKE_CURRENT_SOURCE_DIR}/columnstore" @ONLY) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/columnstoreInstall.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/columnstoreInstall.sh" @ONLY) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/pre-uninstall.in" "${CMAKE_CURRENT_SOURCE_DIR}/pre-uninstall" @ONLY) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/columnstoreLogRotate.in" "${CMAKE_CURRENT_SOURCE_DIR}/columnstoreLogRotate" @ONLY) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/postInstall.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/postInstall.sh" @ONLY) install(PROGRAMS post-install pre-uninstall diff --git a/oam/install_scripts/columnstore b/oam/install_scripts/columnstore.in similarity index 94% rename from oam/install_scripts/columnstore rename to oam/install_scripts/columnstore.in index 1ba4a2c1f..36bfa4f78 100644 --- a/oam/install_scripts/columnstore +++ b/oam/install_scripts/columnstore.in @@ -79,7 +79,7 @@ tmpDir=`$InstallDir/bin/getConfig SystemConfig SystemTempFileDir` mkdir $tmpDir >/dev/null 2>&1 checkInstallSetup() { - InitialInstallFlag=`$InstallDir/bin/getConfig -c $InstallDir/etc/Columnstore.xml Installation InitialInstallFlag` + InitialInstallFlag=`$InstallDir/bin/getConfig -c @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml Installation InitialInstallFlag` if [ $InitialInstallFlag != "y" ]; then echo "Please run the postConfigure install script, check the Installation Guide" echo "for additional details" @@ -108,7 +108,7 @@ start() { fi #checkInstallSetup - CoreFileFlag=`$InstallDir/bin/getConfig -c $InstallDir/etc/Columnstore.xml Installation CoreFileFlag` + CoreFileFlag=`$InstallDir/bin/getConfig -c @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml Installation CoreFileFlag` if [ $CoreFileFlag = "y" ]; then #columnstore core files ulimit -c unlimited > /dev/null 2>&1 diff --git a/oam/install_scripts/columnstoreInstall.sh b/oam/install_scripts/columnstoreInstall.sh.in similarity index 91% rename from oam/install_scripts/columnstoreInstall.sh rename to oam/install_scripts/columnstoreInstall.sh.in index 0933b9009..fc0abf93a 100755 --- a/oam/install_scripts/columnstoreInstall.sh +++ b/oam/install_scripts/columnstoreInstall.sh.in @@ -34,7 +34,7 @@ while true { send_user " password - root password on the servers being installed'\n" send_user " package-type - Package Type being installed (rpm, deb, or binary)\n" send_user " config-file - Optional: Columnstore.xml config file with directory location, i.e. /root/Columnstore.xml\n" - send_user " Default version is $INSTALLDIR/etc/Columnstore.xml.rpmsave\n" + send_user " Default version is @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave\n" send_user " mysql-password - MySQL password on the servers being installed'\n" send_user " -d - Debug flag, output verbose information\n" exit 0 @@ -78,16 +78,16 @@ expect { } if { $CONFIGFILE == " " } { - set CONFIGFILE $INSTALLDIR/etc/Columnstore.xml.rpmsave + set CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave } if { [catch { open $CONFIGFILE "r"} handle ] } { puts "Calpont Config file not found: $CONFIGFILE"; exit 1 } -exec rm -f $INSTALLDIR/etc/Columnstore.xml.new > /dev/null 2>&1 -exec mv -f $INSTALLDIR/etc/Columnstore.xml $INSTALLDIR/etc/Columnstore.xml.new > /dev/null 2>&1 -exec /bin/cp -f $CONFIGFILE $INSTALLDIR/etc/Columnstore.xml > /dev/null 2>&1 +exec rm -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.new > /dev/null 2>&1 +exec mv -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.new > /dev/null 2>&1 +exec /bin/cp -f $CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml > /dev/null 2>&1 set timeout 2 set INSTALL 2 diff --git a/oam/install_scripts/columnstoreLogRotate b/oam/install_scripts/columnstoreLogRotate.in similarity index 83% rename from oam/install_scripts/columnstoreLogRotate rename to oam/install_scripts/columnstoreLogRotate.in index 8bb75ed6e..8c2bb7cd1 100644 --- a/oam/install_scripts/columnstoreLogRotate +++ b/oam/install_scripts/columnstoreLogRotate.in @@ -12,11 +12,11 @@ olddir /var/log/mariadb/columnstore/archive su root root } -/usr/local/mariadb/columnstore/etc/Columnstore.xml { +@ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml { daily dateext copy - olddir /usr/local/mariadb/columnstore/etc/ + olddir @ENGINE_SYSCONFDIR@/columnstore } /usr/local/mariadb/columnstore/mysql/db/*.err { missingok diff --git a/oam/install_scripts/post-install b/oam/install_scripts/post-install.in similarity index 97% rename from oam/install_scripts/post-install rename to oam/install_scripts/post-install.in index d82bbb953..993d8598a 100755 --- a/oam/install_scripts/post-install +++ b/oam/install_scripts/post-install.in @@ -68,8 +68,8 @@ if [ $is64bitpkg -eq 1 -a $is64bitos -ne 1 ]; then exit 1 fi -if [ ! -f $installdir/etc/Columnstore.xml ]; then - echo "$installdir/etc/Columnstore.xml not found, exiting" +if [ ! -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml ]; then + echo "@ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml not found, exiting" exit 1 fi @@ -172,7 +172,7 @@ test -d $installdir/data1/systemFiles/dataTransaction || rmdir $installdir/data1 test -d $installdir/data1/systemFiles/dataTransaction/archive || rmdir $installdir/data1/systemFiles/dataTransaction/archive >/dev/null 2>&1 chmod 1755 $installdir/data1 >/dev/null 2>&1 chmod -R 1755 $installdir/data1/systemFiles >/dev/null 2>&1 -chmod 1755 $installdir/etc > /dev/null 2>&1 +chmod 1755 @ENGINE_SYSCONFDIR@/columnstore > /dev/null 2>&1 #create the bulk-load dirs mkdir -p $installdir/data/bulk/data/import >/dev/null 2>&1 @@ -275,7 +275,7 @@ else sed -i -e s@prefix=/home/guest@prefix=$prefix@g $installdir/bin/* - chown $user:$user $installdir/etc/Columnstore.xml + chown $user:$user @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml cat < /dev/null 2>&1 +/bin/cp -f @ENGINE_SYSCONFDIR@/columnstore/AlarmConfig.xml @ENGINE_SYSCONFDIR@/columnstore/AlarmConfig.xml.installSave > /dev/null 2>&1 #check and get amazon env variables aws=`which aws 2>/dev/null` diff --git a/oam/install_scripts/postInstall.sh b/oam/install_scripts/postInstall.sh.in similarity index 92% rename from oam/install_scripts/postInstall.sh rename to oam/install_scripts/postInstall.sh.in index beb00734f..723b0a125 100755 --- a/oam/install_scripts/postInstall.sh +++ b/oam/install_scripts/postInstall.sh.in @@ -62,7 +62,7 @@ log_user $DEBUG if { $RPMPACKAGE == " " || $PASSWORD == " "} {puts "please enter both RPM and password, enter ./postInstaller.sh -h for additional info"; exit -1} if { $CONFIGFILE == " " } { - set CONFIGFILE $INSTALLDIR/etc/Columnstore.xml.rpmsave + set CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave } if { [catch { open $CONFIGFILE "r"} handle ] } { puts "Calpont Config file not found: $CONFIGFILE"; exit -1 @@ -113,8 +113,8 @@ expect { } expect -re "# " log_user 0 -exec mv -f $INSTALLDIR/etc/Columnstore.xml $INSTALLDIR/etc/Columnstore.xml.new > /dev/null 2>&1 -exec mv -f $CONFIGFILE $INSTALLDIR/etc/Columnstore.xml > /dev/null 2>&1 +exec mv -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.new > /dev/null 2>&1 +exec mv -f $CONFIGFILE @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml > /dev/null 2>&1 send_user "\n" set timeout 380 diff --git a/oam/install_scripts/pre-uninstall b/oam/install_scripts/pre-uninstall.in similarity index 95% rename from oam/install_scripts/pre-uninstall rename to oam/install_scripts/pre-uninstall.in index 139ec5353..497e03fa9 100755 --- a/oam/install_scripts/pre-uninstall +++ b/oam/install_scripts/pre-uninstall.in @@ -130,10 +130,10 @@ fi if [ $quiet != 1 ]; then #make copy of Columnstore.xml - /bin/cp -f $installdir/etc/Columnstore.xml $installdir/etc/Columnstore.xml.rpmsave > /dev/null 2>&1 + /bin/cp -f @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml.rpmsave > /dev/null 2>&1 /bin/cp -f $installdir/mysql/my.cnf $installdir/mysql/my.cnf.rpmsave > /dev/null 2>&1 cp $installdir/bin/myCnf-include-args.text $installdir/bin/myCnf-include-args.text.rpmsave >& /dev/null - rm -f $installdir/etc/AlarmConfig.xml.installSave + rm -f @ENGINE_SYSCONFDIR@/columnstore/AlarmConfig.xml.installSave fi #remove OAMdbrootCheck file diff --git a/oam/install_scripts/syslogSetup.sh b/oam/install_scripts/syslogSetup.sh.in similarity index 99% rename from oam/install_scripts/syslogSetup.sh rename to oam/install_scripts/syslogSetup.sh.in index babbd1c88..97de4d7b2 100755 --- a/oam/install_scripts/syslogSetup.sh +++ b/oam/install_scripts/syslogSetup.sh.in @@ -191,7 +191,7 @@ checkSyslog if [ ! -z "$syslog_conf" ] ; then $installdir/bin/setConfig -d Installation SystemLogConfigFile ${syslog_conf} >/dev/null 2>&1 if [ $non_root_user == "yes" ]; then - chown $user:$user $installdir/etc/Columnstore.xml* + chown $user:$user @ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml* fi rm -f ${syslog_conf}.columnstoreSave diff --git a/oam/oamcpp/liboamcpp.cpp b/oam/oamcpp/liboamcpp.cpp index 3a5caab12..5e3d6bfea 100644 --- a/oam/oamcpp/liboamcpp.cpp +++ b/oam/oamcpp/liboamcpp.cpp @@ -105,34 +105,13 @@ void handleControlC(int i) Oam::Oam() { - // Assigned pointers to Config files - string calpontfiledir; - const char* cf = 0; - InstallDir = startup::StartUp::installDir(); - calpontfiledir = InstallDir + "/etc"; - //FIXME: we should not use this anymore. Everything should be based off the install dir - //If CALPONT_HOME is set, use it for etc directory -#ifdef _MSC_VER - cf = 0; - string cfStr = IDBreadRegistry("CalpontHome"); + CalpontConfigFile = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml"; - if (!cfStr.empty()) - cf = cfStr.c_str(); + AlarmConfigFile = std::string(MCSSYSCONFDIR) + "/columnstore/AlarmConfig.xml"; -#else - cf = getenv("CALPONT_HOME"); -#endif - - if (cf != 0 && *cf != 0) - calpontfiledir = cf; - - CalpontConfigFile = calpontfiledir + "/Columnstore.xml"; - - AlarmConfigFile = calpontfiledir + "/AlarmConfig.xml"; - - ProcessConfigFile = calpontfiledir + "/ProcessConfig.xml"; + ProcessConfigFile = std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml"; if (UseHdfs == 0) { diff --git a/oam/post/CMakeLists.txt b/oam/post/CMakeLists.txt index f6d5a95c7..3b57f111d 100644 --- a/oam/post/CMakeLists.txt +++ b/oam/post/CMakeLists.txt @@ -1,3 +1,3 @@ -install(PROGRAMS functions test-001.sh test-002.sh test-003.sh test-004.sh DESTINATION ${ENGINE_POSTDIR} COMPONENT platform) +install(PROGRAMS functions DESTINATION ${ENGINE_POSTDIR} COMPONENT platform) diff --git a/oam/post/test-001.sh b/oam/post/test-001.sh deleted file mode 100755 index 2f0d0fcfb..000000000 --- a/oam/post/test-001.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -# -# $Id: test-001.sh 3704 2013-08-07 03:33:20Z bwilkinson $ - -prefix=/usr/local - -USER=`whoami 2>/dev/null` - -if [ $USER != "root" ]; then - prefix=$HOME -fi - -if [ $USER != "root" ]; then - if [ -f $prefix/.bash_profile ]; then - profileFile=$prefix/.bash_profile - elif [ -f $prefix/.profile ]; then - profileFile=$prefix/.profile - else - profileFile=$prefix/.bashrc - fi - - . $profileFile -fi - -# Source function library. -if [ -f /etc/init.d/functions ]; then - . /etc/init.d/functions -fi - -if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then - COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore -fi - -export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR - -test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions - -scrname=`basename $0` -tname="check-syscat-oids" - -mt=`module_type` - -#These tests only for PM -if [ "$mt" != "pm" ]; then - exit 0 -fi - -#check for dbrm and data1, don't run if missing both -if firstboot; then - if [ -d $COLUMNSTORE_INSTALL_DIR/data1/000.dir ]; then - cplogger -c 50 $scrname "$tname" "missing dbrm data with existing 000.dir" - exit 1 - else - exit 0 - fi -else - #check for oidbitmap file - if oidbitmapfile; then - cplogger -c 50 $scrname "$tname" "missing oidbitmapfile with existing current file" - exit 1 - fi -fi - -#check for both current file and OIDBITMAP file - -#Make sure all syscat OIDs are present (N.B. only works for shared-everything) - -cplogger -i 48 $scrname "$tname" - -catoids= -catoids="$catoids 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010" -catoids="$catoids 2001 2004" -catoids="$catoids 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040" -catoids="$catoids 2061 2064 2067 2070 2073 2076" - -# TODO-this doesn't work with HDFS file system -#for oid in $catoids; do -# if [ ! -s `oid2file $oid` ]; then -# cplogger -c 50 $scrname "$tname" "could not find file for OID $oid" -# exit 1 -# fi -#done - -cplogger -i 52 $scrname "$tname" - -exit 0 - diff --git a/oam/post/test-002.sh b/oam/post/test-002.sh deleted file mode 100755 index 7e4665672..000000000 --- a/oam/post/test-002.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -# -# $Id: test-002.sh 2937 2012-05-30 18:17:09Z rdempsey $ - -prefix=/usr/local - -USER=`whoami 2>/dev/null` - -if [ $USER != "root" ]; then - prefix=$HOME -fi - -if [ $USER != "root" ]; then - if [ -f $prefix/.bash_profile ]; then - profileFile=$prefix/.bash_profile - elif [ -f $prefix/.profile ]; then - profileFile=$prefix/.profile - else - profileFile=$prefix/.bashrc - fi - - . $profileFile -fi - -# Source function library. -if [ -f /etc/init.d/functions ]; then - . /etc/init.d/functions -fi - -if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then - COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore -fi - -export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR - -test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions - -scrname=`basename $0` -tname="check-brm" - -#Don't run on first boot -if firstboot; then - exit 0 -fi - -#Make sure BRM is read-write - -cplogger -i 48 $scrname "$tname" - -#turn this test off for now...it doesn't if the DBRM isn't started, and these tests run too early -# we need a way to run some tests at different stages of system startup... -#dbrmctl status 2>&1 | egrep -qsi '^ok' -/bin/true -rc=$? - -if [ $rc -ne 0 ]; then - cplogger -c 50 $scrname "$tname" "the BRM is read only" - exit 1 -fi - -cplogger -i 52 $scrname "$tname" - -exit 0 - diff --git a/oam/post/test-003.sh b/oam/post/test-003.sh deleted file mode 100755 index 7d733ef78..000000000 --- a/oam/post/test-003.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash -# -# $Id: test-003.sh 2937 2012-05-30 18:17:09Z rdempsey $ - -prefix=/usr/local - -USER=`whoami 2>/dev/null` - -if [ $USER != "root" ]; then - prefix=$HOME -fi - -if [ $USER != "root" ]; then - if [ -f $prefix/.bash_profile ]; then - profileFile=$prefix/.bash_profile - elif [ -f $prefix/.profile ]; then - profileFile=$prefix/.profile - else - profileFile=$prefix/.bashrc - fi - - . $profileFile -fi - -# Source function library. -if [ -f /etc/init.d/functions ]; then - . /etc/init.d/functions -fi - -if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then - COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore -fi - -export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR - -test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions - -scrname=`basename $0` -tname="check-oid-bitmap" - -#Don't run on first boot -if firstboot; then - exit 0 -fi - -#Make sure there is an oid bitmap file if there are any EM entries - -cplogger -i 48 $scrname "$tname" - -obmfile=$(getConfig OIDManager OIDBitmapFile) -emcnt=$(editem -o 2001 | wc -l) - -rc=1 -if [ -f $obmfile -o $emcnt -eq 0 ]; then - rc=0 -fi - -if [ $rc -ne 0 ]; then - cplogger -c 50 $scrname "$tname" "there is no OID bitmap file but there are Extent Map entires" - exit 1 -fi - -cplogger -i 52 $scrname "$tname" - -exit 0 - diff --git a/oam/post/test-004.sh b/oam/post/test-004.sh deleted file mode 100644 index 3da9255ac..000000000 --- a/oam/post/test-004.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -# -# $Id: test-004.sh 1538 2009-07-22 18:57:04Z dhill $ - -# -# Validates that FilesPerColumnPartition setting is not set lower than existing extents. -# - -prefix=/usr/local - -USER=`whoami 2>/dev/null` - -if [ $USER != "root" ]; then - prefix=$HOME -fi - -if [ $USER != "root" ]; then - if [ -f $prefix/.bash_profile ]; then - profileFile=$prefix/.bash_profile - elif [ -f $prefix/.profile ]; then - profileFile=$prefix/.profile - else - profileFile=$prefix/.bashrc - fi - - . $profileFile -fi - -# Source function library. -if [ -f /etc/init.d/functions ]; then - . /etc/init.d/functions -fi - -if [ -z "$COLUMNSTORE_INSTALL_DIR" ]; then - COLUMNSTORE_INSTALL_DIR=/usr/local/mariadb/columnstore -fi - -export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DI -export COLUMNSTORE_INSTALL_DIR=$COLUMNSTORE_INSTALL_DIR - -test -f $COLUMNSTORE_INSTALL_DIR/post/functions && . $COLUMNSTORE_INSTALL_DIR/post/functions - -scrname=`basename $0` -tname="validate-partition-size" - -#Don't run on first boot -if firstboot; then - exit 0 -fi - -exit 0 - -cplogger -i 48 $scrname "$tname" - -# Get the FilesPerColumnPartition setting from Columnstore.xml. -filesPer=$(getConfig ExtentMap FilesPerColumnPartition) - -# Get the maximum segment number for all column files. -maxSeg=$(editem -i | awk -F '|' -v max=0 '{if($7>max)max=$7}END{print max+1}') - -# Error and out if the maximum existing segment number is higher than the FilesPerColumnPartition setting. -if [ $maxSeg -gt $filesPer ]; then - cplogger -c 50 $scrname "$tname" "One or more tables were populated with FilesPerColumnPartition higher than the current setting." - exit 1 -fi - -cplogger -i 52 $scrname "$tname" - -exit 0 - diff --git a/oamapps/columnstoreSupport/columnstoreSupport.cpp b/oamapps/columnstoreSupport/columnstoreSupport.cpp index 7597f504c..cea177681 100644 --- a/oamapps/columnstoreSupport/columnstoreSupport.cpp +++ b/oamapps/columnstoreSupport/columnstoreSupport.cpp @@ -29,6 +29,7 @@ #include #include +#include "config.h" #include "liboamcpp.h" #include "configcpp.h" #include "installdir.h" @@ -326,11 +327,11 @@ void reportThread(string reporttype) system(cmd.c_str()); cmd = "echo ' ' >> " + outputFile; system(cmd.c_str()); - cmd = "echo '################# cat /etc/Columnstore.xml ################# ' >> " + outputFile; + cmd = "echo '################# cat /etc/columnstore/Columnstore.xml ################# ' >> " + outputFile; system(cmd.c_str()); cmd = "echo ' ' >> " + outputFile; system(cmd.c_str()); - cmd = "cat " + installDir + "/etc/Columnstore.xml >> " + outputFile; + cmd = "cat " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml >> " + outputFile; system(cmd.c_str()); } diff --git a/oamapps/mcsadmin/mcsadmin.cpp b/oamapps/mcsadmin/mcsadmin.cpp index b54e5e1b2..125064c3c 100644 --- a/oamapps/mcsadmin/mcsadmin.cpp +++ b/oamapps/mcsadmin/mcsadmin.cpp @@ -28,10 +28,12 @@ extern int h_errno; #include "columnstoreversion.h" #include "mcsadmin.h" -#include "boost/filesystem/operations.hpp" -#include "boost/filesystem/path.hpp" -#include "boost/scoped_ptr.hpp" -#include "boost/tokenizer.hpp" +#include +#include +#include +#include + +#include "config.h" #include "sessionmanager.h" #include "dbrm.h" #include "messagequeue.h" @@ -200,7 +202,7 @@ int main(int argc, char* argv[]) tmpDir = startup::StartUp::tmpDir(); installDir = startup::StartUp::installDir(); - string cf = installDir + "/etc/" + ConsoleCmdsFile; + string cf = std::string(MCSSYSCONFDIR) + "/columnstore/" + ConsoleCmdsFile; fConfig = Config::makeConfig(cf); // setupSignalHandlers(); @@ -9497,7 +9499,7 @@ void printModuleDisk(ModuleDisk moduledisk) cout << "Mount Point Total Blocks Used Blocks Usage %" << endl; cout << "----------------------------- ------------ ------------ -------" << endl; - string etcdir = installDir + "/etc"; + string etcdir = std::string(MCSSYSCONFDIR) + "/columnstore"; for ( unsigned int i = 0 ; i < moduledisk.diskusage.size(); i++) { @@ -9538,7 +9540,7 @@ void printModuleDisk(ModuleDisk moduledisk) void printModuleResources(TopProcessCpuUsers topprocesscpuusers, ModuleCpu modulecpu, TopProcessMemoryUsers topprocessmemoryusers, ModuleMemory modulememory, ModuleDisk moduledisk) { Oam oam; - string etcdir = installDir + "/etc"; + string etcdir = std::string(MCSSYSCONFDIR) + "/columnstore"; cout << endl << "Module '" + topprocesscpuusers.ModuleName + "' Resource Usage" << endl << endl; diff --git a/oamapps/postConfigure/amazonInstaller.cpp b/oamapps/postConfigure/amazonInstaller.cpp index a4988b966..22560fc78 100644 --- a/oamapps/postConfigure/amazonInstaller.cpp +++ b/oamapps/postConfigure/amazonInstaller.cpp @@ -476,8 +476,8 @@ int main(int argc, char* argv[]) } //backup current Columnstore.xml - string configFile = installDir + "/etc/Columnstore.xml"; - string saveFile = installDir + "/etc/Columnstore.xml.save"; + string configFile = MCSSYSCONFDIR + "/columnstore/Columnstore.xml"; + string saveFile = MCSSYSCONFDIR + "/columnstore/Columnstore.xml.save"; string cmd = "rm -f " + saveFile; system(cmd.c_str()); cmd = "cp " + configFile + " " + saveFile; @@ -2427,9 +2427,9 @@ int main(int argc, char* argv[]) } //copy Columnstore.xml to Columnstore.xml.rpmsave for postConfigure no-prompt option - cmd = "rm -f " + installDir + "/etc/Columnstore.xml.rpmsave"; + cmd = "rm -f " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml.rpmsave"; system(cmd.c_str()); - cmd = "cp " + installDir + "/etc/Columnstore.xml " + installDir + "/etc/Columnstore.xml.rpmsave"; + cmd = "cp " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml.rpmsave"; int rtnCode = system(cmd.c_str()); if (WEXITSTATUS(rtnCode) != 0) diff --git a/oamapps/postConfigure/helpers.cpp b/oamapps/postConfigure/helpers.cpp index e020ff7bc..476476449 100644 --- a/oamapps/postConfigure/helpers.cpp +++ b/oamapps/postConfigure/helpers.cpp @@ -25,6 +25,7 @@ #include +#include "config.h" #include "configcpp.h" using namespace config; @@ -100,7 +101,7 @@ bool waitForActive() void dbrmDirCheck() { - const string fname = installDir + "/etc/Columnstore.xml.rpmsave"; + const string fname = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave"; ifstream oldFile (fname.c_str()); if (!oldFile) return; diff --git a/oamapps/postConfigure/installer.cpp b/oamapps/postConfigure/installer.cpp index 85c8a30fa..bff95a3b5 100644 --- a/oamapps/postConfigure/installer.cpp +++ b/oamapps/postConfigure/installer.cpp @@ -47,6 +47,7 @@ #include #include +#include "config.h" #include "liboamcpp.h" #include "configcpp.h" @@ -202,9 +203,9 @@ int main(int argc, char* argv[]) //copy Columnstore.xml.rpmsave if upgrade option is selected if ( installType == "upgrade" ) { - cmd = "/bin/cp -f " + installDir + "/etc/Columnstore.xml " + installDir + "/etc/Columnstore.xml.new 2>&1"; + cmd = "/bin/cp -f " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.new 2>&1"; system(cmd.c_str()); - cmd = "/bin/cp -f " + installDir + "/etc/Columnstore.xml.rpmsave " + installDir + "/etc/Columnstore.xml 2>&1"; + cmd = "/bin/cp -f " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml 2>&1"; system(cmd.c_str()); } @@ -1157,7 +1158,7 @@ bool updateProcessConfig(int serverTypeInstall) } } - string fileName = installDir + "/etc/ProcessConfig.xml"; + string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml"; //Save a copy of the original version cmd = "/bin/cp -f " + fileName + " " + fileName + ".columnstoreSave > /dev/null 2>&1"; @@ -1351,7 +1352,7 @@ bool makeRClocal(string moduleName, int IserverTypeInstall) */ bool uncommentCalpontXml( string entry) { - string fileName = installDir + "/etc/Columnstore.xml"; + string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml"; ifstream oldFile (fileName.c_str()); diff --git a/oamapps/postConfigure/postConfigure.cpp b/oamapps/postConfigure/postConfigure.cpp index 645357e8a..7b29b1b12 100644 --- a/oamapps/postConfigure/postConfigure.cpp +++ b/oamapps/postConfigure/postConfigure.cpp @@ -65,10 +65,11 @@ #include #include -#include "boost/filesystem/operations.hpp" -#include "boost/filesystem/path.hpp" -#include "boost/tokenizer.hpp" +#include +#include +#include +#include "config.h" #include "columnstoreversion.h" #include "liboamcpp.h" #include "configcpp.h" @@ -559,7 +560,7 @@ int main(int argc, char* argv[]) } if ( oldFileName == oam::UnassignedName ) - oldFileName = installDir + "/etc/Columnstore.xml.rpmsave"; + oldFileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave"; cout << endl; cout << "This is the MariaDB ColumnStore System Configuration and Installation tool." << endl; @@ -4281,8 +4282,8 @@ int main(int argc, char* argv[]) */ bool checkSaveConfigFile() { - string rpmFileName = installDir + "/etc/Columnstore.xml"; - string newFileName = installDir + "/etc/Columnstore.xml.new"; + string rpmFileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml"; + string newFileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.new"; string extentMapCheckOnly = " "; @@ -4432,7 +4433,7 @@ bool checkSaveConfigFile() return false; } - cmd = "cd " + installDir + "/etc/;../bin/autoConfigure " + extentMapCheckOnly; + cmd = "cd " + std::string(MCSSYSCONFDIR) + "/columnstore;" + installDir + "/bin/autoConfigure " + extentMapCheckOnly; rtnCode = system(cmd.c_str()); if (WEXITSTATUS(rtnCode) != 0) @@ -4567,7 +4568,7 @@ bool updateProcessConfig() string newModule = ">pm"; oldModule.push_back(">um"); - string fileName = installDir + "/etc/ProcessConfig.xml"; + string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml"; //Save a copy of the original version string cmd = "/bin/cp -f " + fileName + " " + fileName + ".columnstoreSave > /dev/null 2>&1"; @@ -4639,7 +4640,7 @@ bool updateProcessConfig() */ bool uncommentCalpontXml( string entry) { - string fileName = installDir + "/etc/Columnstore.xml"; + string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml"; ifstream oldFile (fileName.c_str()); @@ -5314,7 +5315,7 @@ bool storageSetup(bool amazonInstall) hadoopInstalled = "y"; // check whether StorageManager is installed - Config *processConfig = Config::makeConfig((installDir + "/etc/ProcessConfig.xml").c_str()); + Config *processConfig = Config::makeConfig((std::string(MCSSYSCONFDIR) + "/columnstore/ProcessConfig.xml").c_str()); string storageManagerLocation; bool storageManagerInstalled = false; // search the 'PROCESSCONFIG#' entries for the StorageManager entry diff --git a/procmgr/processmanager.cpp b/procmgr/processmanager.cpp index 3c531359f..454c46696 100644 --- a/procmgr/processmanager.cpp +++ b/procmgr/processmanager.cpp @@ -25,6 +25,7 @@ #include #include "columnstoreversion.h" +#include "config.h" #include "processmanager.h" #include "installdir.h" #include "dbrm.h" @@ -8778,7 +8779,7 @@ void ProcessManager::clearNICAlarms(std::string hostName) ******************************************************************************************/ bool ProcessManager::updateExtentMap() { - string fileName = startup::StartUp::installDir() + "/etc/Columnstore.xml"; + string fileName = std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml"; ifstream oldFile (fileName.c_str()); @@ -9018,7 +9019,7 @@ int ProcessManager::distributeConfigFile(std::string name, std::string file) log.writeLog(__LINE__, "distributeConfigFile called for " + name + " file = " + file, LOG_TYPE_DEBUG); - string dirName = startup::StartUp::installDir() + "/etc/"; + string dirName = std::string(MCSSYSCONFDIR) + "/columnstore/"; string fileName = dirName + file; ifstream in (fileName.c_str()); @@ -10204,7 +10205,7 @@ int ProcessManager::OAMParentModuleChange() else { // update the Columnstore.xml with the new IP Address - string cmd = "sed -i s/" + downOAMParentIPAddress + "/" + currentIPAddr + "/g " + startup::StartUp::installDir() + "/etc/Columnstore.xml"; + string cmd = "sed -i s/" + downOAMParentIPAddress + "/" + currentIPAddr + "/g " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml"; system(cmd.c_str()); // get parent hotsname and IP address in case it changed diff --git a/procmon/processmonitor.cpp b/procmon/processmonitor.cpp index 976abd42c..967430853 100644 --- a/procmon/processmonitor.cpp +++ b/procmon/processmonitor.cpp @@ -28,6 +28,7 @@ #include #include "columnstoreversion.h" +#include "config.h" #include "IDBDataFile.h" #include "IDBPolicy.h" #include "processmonitor.h" @@ -5885,7 +5886,7 @@ bool ProcessMonitor::amazonIPCheck() log.writeLog(__LINE__, "Module is Running: '" + moduleName + "' / Instance '" + instanceID + "' current IP being reconfigured in Columnstore.xml. old = " + IPAddr + ", new = " + currentIPAddr, LOG_TYPE_DEBUG); // update the Columnstore.xml with the new IP Address - string cmd = "sed -i s/" + IPAddr + "/" + currentIPAddr + "/g /usr/local/mariadb/columnstore/etc/Columnstore.xml"; + string cmd = "sed -i s/" + IPAddr + "/" + currentIPAddr + "/g " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml"; system(cmd.c_str()); } else diff --git a/storage-manager/CMakeLists.txt b/storage-manager/CMakeLists.txt index 7835e6acd..dbd7bb9fd 100755 --- a/storage-manager/CMakeLists.txt +++ b/storage-manager/CMakeLists.txt @@ -157,6 +157,6 @@ install(PROGRAMS ) install(FILES storagemanager.cnf - DESTINATION ${INSTALL_ENGINE}/etc + DESTINATION ${ENGINE_SYSCONFDIR}/columnstore COMPONENT platform) diff --git a/storage-manager/src/Config.cpp b/storage-manager/src/Config.cpp index c7927d217..c9c4c2201 100644 --- a/storage-manager/src/Config.cpp +++ b/storage-manager/src/Config.cpp @@ -17,6 +17,10 @@ #include "Config.h" + +// This one is the build system config +#include "config.h" + #include #include #include @@ -69,7 +73,7 @@ Config::Config() : die(false) // the paths to search in order paths.push_back("."); if (cs_install_dir) - paths.push_back(string(cs_install_dir) + "/etc"); + paths.push_back(string(MCSSYSCONFDIR) + "/columnstore"); paths.push_back("/etc"); for (uint i = 0; i < paths.size(); i++) diff --git a/tools/configMgt/autoInstaller.cpp b/tools/configMgt/autoInstaller.cpp index 74f697af7..9c07bcd9b 100644 --- a/tools/configMgt/autoInstaller.cpp +++ b/tools/configMgt/autoInstaller.cpp @@ -39,6 +39,7 @@ #include #include +#include "config.h" #include "liboamcpp.h" #include "configcpp.h" @@ -623,7 +624,7 @@ int main(int argc, char* argv[]) for ( int retry = 0 ; retry < 5 ; retry++ ) { - cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + installDir + "" + installLocation + "/etc/Columnstore.xml " + systemUser + " " + debug_flag; + cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml " + systemUser + " " + debug_flag; rtnCode = system(cmd.c_str()); sleep(2); @@ -685,7 +686,7 @@ int main(int argc, char* argv[]) RPMSAVE: //try Columnstore.xml.rpmsave cout << "Get System Columnstore.xml.rpmsave " << flush; - cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + installDir + "" + installLocation + "/etc/Columnstore.xml.rpmsave " + systemUser + " " + debug_flag; + cmd = "./remote_scp_get.sh " + installParentModuleIPAddr + " " + password + " " + std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml.rpmsave " + systemUser + " " + debug_flag; rtnCode = system(cmd.c_str()); if (rtnCode == 0) diff --git a/tools/configMgt/beetlejuice_installer.sh b/tools/configMgt/beetlejuice_installer.sh deleted file mode 100755 index 64c5e4d8d..000000000 --- a/tools/configMgt/beetlejuice_installer.sh +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/expect -# -# $Id: beetlejuice_installer.sh 421 2007-04-05 15:46:55Z dhill $ -# -# Beetlejuice Installer -# Argument 0 - Server IP address -# Argument 1 - Root Password -# Argument 2 - Debug flag 1 for on, 0 for off - -set timeout 30 -set USERNAME root -set SERVER [lindex $argv 0] -set PASSWORD [lindex $argv 1] -set PACKAGE [lindex $argv 2] -set RELEASE [lindex $argv 3] -set DEBUG [lindex $argv 4] -log_user $DEBUG -spawn -noecho /bin/bash -# -# get the package -# -send_user "Get Calpont Package " -send "rm -f $PACKAGE\n" -#expect -re "#" -send "smbclient //cal6500/shared -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $PACKAGE'\n" -expect { - -re "NT_STATUS_NO_SUCH_FILE" { send_user "FAILED: $PACKAGE not found in //cal6500/shared/Iterations/$RELEASE/\n" ; exit -1 } - -re "getting" { send_user "DONE" } abort -} -send_user "\n" -# -# send the DM package -# -expect -re "#" -send_user "Copy Calpont Package " -send "ssh $USERNAME@$SERVER 'rm -f /root/calpont*.rpm'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -expect -re "#" -send "scp $PACKAGE $USERNAME@$SERVER:/root/.\n" -expect { - -re "authenticity" { send "yes\n" - expect { - -re "word: " { send "$PASSWORD\n" } abort - } - } - -re "service not known" { send_user "FAILED: Invalid Host\n" ; exit -1 } - -re "word: " { send "$PASSWORD\n" } abort -} -expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 } -} -send "rm -f $PACKAGE\n" -# -# backup custom os files -# -send_user "\n" -expect -re "#" -send_user "Backup Custom OS Files " -send "ssh $USERNAME@$SERVER 'rm -f /etc/*.calpont;cp /etc/inittab /etc/inittab.calpont;cp /etc/syslog.conf /etc/syslog.conf.calpont'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort - -re "cp" { send_user "FAILED" ; exit -1 } -} -send_user "\n" -# -# unmount disk -# -expect -re "#" -send_user "Unmount disk " -send "ssh $USERNAME@$SERVER 'umount -a'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort -} -send_user "\n" -# -# erase package -# -expect -re "#" -send_user "Erase Old Calpont-oracle Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps calpont-oracle'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "#" { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -send_user "Erase Old Calpont-Mysql Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps calpont-mysql'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "#" { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -send_user "Erase Old Calpont Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps calpont'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "#" { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -# -# install package -# -expect -re "#" -set timeout 120 -send_user "Install New Calpont Package " -send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$PACKAGE'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -# -# Restore custom os files -# -set timeout 30 -expect -re "#" -send_user "Restore Custom OS Files " -send "ssh $USERNAME@$SERVER 'mv -f /etc/inittab.calpont /etc/inittab;mv -f /etc/syslog.conf.calpont /etc/syslog.conf'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort - -re "mv: cannot" { send_user "FAILED" ; exit -1 } -} -send_user "\n" -# -# mount disk -# -expect -re "#" -send_user "Mount disk " -send "ssh $USERNAME@$SERVER 'mount -a'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort -} -send_user "\n" -# -# restart syslog -# -expect -re "#" -send_user "Restart syslog service " -send "ssh $USERNAME@$SERVER 'service syslog restart'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "Starting kernel logger" { send_user "DONE" } abort - -re "service " { send_user "WARNING: service not available" } abort -} -send_user "\n" -# -# startup ProcMons -# -expect -re "#" -send_user "Startup ProcMon's " -send "ssh $USERNAME@$SERVER 'kill -HUP 1'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort -} -send_user "\n" -# -exit - diff --git a/tools/configMgt/beetlejuice_installer_3rpms.sh b/tools/configMgt/beetlejuice_installer_3rpms.sh deleted file mode 100755 index 42adf26fd..000000000 --- a/tools/configMgt/beetlejuice_installer_3rpms.sh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/expect -# -# $Id: beetlejuice_installer.sh 421 2007-04-05 15:46:55Z dhill $ -# -# Beetlejuice Installer -# Argument 0 - Server IP address -# Argument 1 - Root Password -# Argument 2 - Debug flag 1 for on, 0 for off - -set timeout 30 -set USERNAME root -set SERVER [lindex $argv 0] -set PASSWORD [lindex $argv 1] -set SYSTEMRPM [lindex $argv 2] -set CALPONTRPMNAME [lindex $argv 3] -set CONNECTORRPM1NAME [lindex $argv 4] -set CONNECTORRPM2NAME [lindex $argv 5] -set RELEASE [lindex $argv 6] -set DEBUG [lindex $argv 7] - -set CALPONTRPM $CALPONTRPMNAME"-1"$SYSTEMRPM -set CONNECTORRPM1 $CONNECTORRPM1NAME"-1"$SYSTEMRPM -set CONNECTORRPM2 $CONNECTORRPM2NAME"-1"$SYSTEMRPM -#set SHARED "//cal6500/shared" -set SHARED "//calweb/shared" - -log_user $DEBUG - -spawn -noecho /bin/bash -# -# get the package -# -send_user "Get Calpont Packages " -send "rm -f $SYSTEMRPM\n" -#expect -re "#" -send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $SYSTEMRPM'\n" -expect { - -re "NT_STATUS_NO_SUCH_FILE" { send_user "FAILED: $SYSTEMRPM not found in $SHARED/Iterations/$RELEASE/\n" ; exit -1 } - -re "getting" { send_user "DONE" } abort -} -send_user "\n" -# -# send the DM Package -# -expect -re "#" -send_user "Copy Calpont Packages " -send "ssh $USERNAME@$SERVER 'rm -f /root/$SYSTEMRPM'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -expect -re "#" -send "scp $SYSTEMRPM $USERNAME@$SERVER:/root/.\n" -expect { - -re "authenticity" { send "yes\n" - expect { - -re "word: " { send "$PASSWORD\n" } abort - } - } - -re "service not known" { send_user "FAILED: Invalid Host\n" ; exit -1 } - -re "word: " { send "$PASSWORD\n" } abort -} -expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid Package\n" ; exit -1 } -} -send "rm -f $SYSTEMRPM\n" -# -# backup custom os files -# -send_user "\n" -expect -re "#" -send_user "Backup Custom OS Files " -send "ssh $USERNAME@$SERVER 'rm -f /etc/*.calpont;cp /etc/inittab /etc/inittab.calpont;cp /etc/syslog.conf /etc/syslog.conf.calpont'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort - -re "cp" { send_user "FAILED" ; exit -1 } -} -send_user "\n" -# -# unmount disk -# -expect -re "#" -send_user "Unmount disk " -send "ssh $USERNAME@$SERVER 'umount -a'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort -} -send_user "\n" -# -# erase Package -# -expect -re "#" -send_user "Erase Old $CONNECTORRPM1NAME Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps $CONNECTORRPM1NAME'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "#" { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -send_user "Erase Old $CONNECTORRPM2NAME Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps $CONNECTORRPM2NAME'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "#" { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -send_user "Erase Old $CALPONTRPMNAME Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps $CALPONTRPMNAME'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "#" { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -# -# install Package -# -expect -re "#" -set timeout 120 -send_user "Install New $CALPONTRPMNAME Package " -send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CALPONTRPM'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -send_user "Install New $CONNECTORRPM1NAME Package " -send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CONNECTORRPM1'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -expect -re "#" -send_user "Install New $CONNECTORRPM2NAME Package " -send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CONNECTORRPM2'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -# -# Restore custom os files -# -set timeout 30 -expect -re "#" -send_user "Restore Custom OS Files " -send "ssh $USERNAME@$SERVER 'mv -f /etc/inittab.calpont /etc/inittab;mv -f /etc/syslog.conf.calpont /etc/syslog.conf'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort - -re "mv: cannot" { send_user "FAILED" ; exit -1 } -} -send_user "\n" -# -# mount disk -# -expect -re "#" -send_user "Mount disk " -send "ssh $USERNAME@$SERVER 'mount -a'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort -} -send_user "\n" -# -# restart syslog -# -expect -re "#" -send_user "Restart syslog service " -send "ssh $USERNAME@$SERVER 'service syslog restart'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "Starting kernel logger" { send_user "DONE" } abort - -re "service " { send_user "WARNING: service not available" } abort -} -send_user "\n" -# -# startup ProcMons -# -expect -re "#" -send_user "Startup ProcMon's " -send "ssh $USERNAME@$SERVER 'kill -HUP 1'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "#" { send_user "DONE" } abort -} -send_user "\n" -# -exit - diff --git a/tools/configMgt/configure.cpp b/tools/configMgt/configure.cpp index 7c600c2bb..a041055af 100644 --- a/tools/configMgt/configure.cpp +++ b/tools/configMgt/configure.cpp @@ -234,7 +234,7 @@ int main(int argc, char* argv[]) exit (-1); } - cmd = "./remote_scp_get.sh " + parentOAMModuleIPAddr + " " + password + " /usr/local/mariadb/columnstore/etc/Columnstore.xml 0 "; + cmd = "./remote_scp_get.sh " + parentOAMModuleIPAddr + " " + password + " " + MCSSYSCONFDIR + "/columnstore/Columnstore.xml 0 "; rtnCode = system(cmd.c_str()); if (rtnCode == 0) diff --git a/tools/configMgt/parent_installer.sh b/tools/configMgt/parent_installer.sh deleted file mode 100755 index d2a9a0e73..000000000 --- a/tools/configMgt/parent_installer.sh +++ /dev/null @@ -1,365 +0,0 @@ -#!/usr/bin/expect -# -# $Id: parent_installer.sh 421 2007-04-05 15:46:55Z dhill $ -# -# Parent OAM Installer, copy RPM's and custom OS files from postConfigure script -# Argument 0 - Parent OAM IP address -# Argument 1 - Root Password of Parent OAM Module -# Argument 2 - Calpont Config File -# Argument 3 - Debug flag 1 for on, 0 for off - -set timeout 40 -set USERNAME root -set SERVER [lindex $argv 0] -set PASSWORD [lindex $argv 1] -set PACKAGE [lindex $argv 2] -set RELEASE [lindex $argv 3] -set CONFIGFILE [lindex $argv 4] -set DEBUG [lindex $argv 5] -set CALPONTPACKAGE infinidb-platform-$PACKAGE -set CALPONTPACKAGE0 infinidb-0$PACKAGE -set CALPONTPACKAGE1 infinidb-1$PACKAGE -set ORACLEPACKAGE infinidb-oracle$PACKAGE -set MYSQLPACKAGE infinidb-storage-engine-$PACKAGE -set MYSQLDPACKAGE infinidb-mysql-$PACKAGE - -set SHARED "//calweb/shared" - -log_user $DEBUG -spawn -noecho /bin/bash -send "rm -f $PACKAGE,$CALPONTPACKAGE0,$CALPONTPACKAGE1,$ORACLEPACKAGE,$MYSQLPACKAGE,$MYSQLDPACKAGE\n" -# -# delete and erase all old packages from Director Module -# -send "ssh $USERNAME@$SERVER 'rm -f /root/calpont*.rpm'\n" -expect { - -re "authenticity" { send "yes\n" - expect { - -re "word: " { send "$PASSWORD\n" } abort - } - } - -re "service not known" { send_user "FAILED: Invalid Host\n" ; exit -1 } - -re "word: " { send "$PASSWORD\n" } abort -} -expect { - -re "#" { } abort - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -# -# erase calpont-oracle package -# -expect -re "# " -send_user "Erase Old Calpont-Oracle Connector Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches calpont-oracle'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "# " { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -# -# erase infinidb-mysql package -# -expect -re "# " -send_user "Erase Old Calpont-Mysqld Connector Package " -send "ssh $USERNAME@$SERVER 'pkill -9 mysqld'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "# " { } abort -} -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches infinidb-mysql'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "# " { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -# -# erase infinidb-storage-engine package -# -expect -re "# " -send_user "Erase Old Calpont-Mysql Connector Package " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches infinidb-storage-engine'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "# " { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -send "rm -f $PACKAGE\n" -# -# erase calpont package -# -expect -re "# " -send_user "Erase Old Calpont Packages " -send "ssh $USERNAME@$SERVER ' rpm -e --nodeps --allmatches infinidb-libs infinidb-platform infinidb-enterprise'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "uninstall completed" { send_user "DONE" } abort - -re "# " { send_user "DONE" } abort - -re "not installed" { send_user "WARNING: Package not installed" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -# -# get the calpont package -# -expect -re "# " -send_user "Get Calpont Package " -send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $CALPONTPACKAGE0'\n" -expect { - -re "NT_STATUS_NO_SUCH_FILE" { - send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $CALPONTPACKAGE1'\n" - expect { - -re "NT_STATUS_NO_SUCH_FILE" { send_user "FAILED: $CALPONTPACKAGE not found\n" ; exit -1 } - -re "getting" { send_user "DONE" } abort - } - } - -re "getting" { send_user "DONE" } abort -} -send_user "\n" -# -# send the calpont package -# -send_user "Copy Calpont Package " -send "scp $CALPONTPACKAGE $USERNAME@$SERVER:/root/.\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 } -} -send_user "\n" -send "rm -f $PACKAGE\n" -# -# install calpont package -# -expect -re "# " -set timeout 120 -send_user "Install New Calpont Package " -send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$CALPONTPACKAGE'\n" -expect -re "word: " -# password for ssh -send "$PASSWORD\n" -# check return -expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } -} -send_user "\n" -set timeout 40 -expect -re "# " -send "rm -f $PACKAGE\n" -# -if { $CONFIGFILE != "NULL"} { - # - # copy over Columnstore.xml file - # - send_user "Copy Calpont Configuration File " - send "scp $CONFIGFILE $USERNAME@$SERVER:/usr/local/mariadb/columnstore/etc/Columnstore.xml\n" - expect -re "word: " - # send the password - send "$PASSWORD\n" - expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 } - } -} else { - # - # rename previous installed config file - # - send_user "Copy RPM-saved Calpont Configuration File " - send "ssh $USERNAME@$SERVER 'cd /usr/local/mariadb/columnstore/etc/;mv -f Columnstore.xml Columnstore.xml.install;cp -v Columnstore.xml.rpmsave Columnstore.xml'\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "Columnstore.xml" { send_user "DONE" } abort - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - } -} -send_user "\n" -# -# get the calpont-oracle package -# -set timeout 40 -expect -re "# " -send_user "Get Calpont-Oracle Connector Package " -send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $ORACLEPACKAGE'\n" -expect { - -re "NT_STATUS_NO_SUCH_FILE" { send_user "WARNING: $ORACLEPACKAGE not found, skipping\n" } abort - -re "getting" { send_user "DONE\n" - # - # send the calpont-oracle package - # - expect -re "# " - send_user "Copy Calpont-Oracle Connector Package " - send "scp $ORACLEPACKAGE $USERNAME@$SERVER:/root/.\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 } - } - # - # install calpont-oracle package - # - send_user "\n" - expect -re "# " - set timeout 120 - send_user "Install Calpont-Oracle Connector Package " - send "ssh $USERNAME@$SERVER ' rpm -ivh /root/$ORACLEPACKAGE'\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - } - send_user "\n" - } -} -set timeout 40 -expect -re "# " -# -# get the calpont-mysql package -# -send_user "Get Calpont-Mysql Connector Package " -send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $MYSQLPACKAGE'\n" -expect { - -re "NT_STATUS_NO_SUCH_FILE" { send_user "WARNING: $MYSQLPACKAGE not found, skipping\n" } abort - -re "getting" { send_user "DONE\n" - # - # send the calpont-mysql package - # - expect -re "# " - send_user "Copy Calpont-Mysql Connector Package " - send "scp $MYSQLPACKAGE $USERNAME@$SERVER:/root/.\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 } - } - # - # install calpont-mysql package - # - send_user "\n" - expect -re "# " - set timeout 120 - send_user "Install Calpont-Mysql Connector Package " - send "ssh $USERNAME@$SERVER ' rpm -ivh $MYSQLPACKAGE'\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - } - send_user "\n" - } -} -expect -re "# " -# -# get the infinidb-mysql package -# -send_user "Get Calpont-Mysqld Package " -send "smbclient $SHARED -Wcalpont -Uoamuser%Calpont1 -c 'cd Iterations/$RELEASE/;prompt OFF;mget $MYSQLDPACKAGE'\n" -expect { - -re "NT_STATUS_NO_SUCH_FILE" { send_user "WARNING: $MYSQLDPACKAGE not found, skipping\n" } abort - -re "getting" { send_user "DONE\n" - # - # send the infinidb-mysql package - # - expect -re "# " - send_user "Copy Calpont-Mysqld Package " - send "scp $MYSQLDPACKAGE $USERNAME@$SERVER:.\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "100%" { send_user "DONE" } abort - -re "scp" { send_user "FAILED\n" ; - send_user "\n*** Installation Failed\n" ; - exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - -re "No such file or directory" { send_user "FAILED: Invalid package\n" ; exit -1 } - } - # - # install infinidb-mysql-mysqld package - # - send_user "\n" - expect -re "# " - set timeout 120 - send_user "Install Calpont-Mysqld Package " - send "ssh $USERNAME@$SERVER ' rpm -ivh $MYSQLDPACKAGE'\n" - expect -re "word: " - # password for ssh - send "$PASSWORD\n" - # check return - expect { - -re "completed" { send_user "DONE" } abort - -re "Failed dependencies" { send_user "FAILED: Failed dependencies" ; exit -1 } - -re "Permission denied, please try again" { send_user "FAILED: Invalid password\n" ; exit -1 } - } - send_user "\n" - } -} -# -exit - diff --git a/tools/dbbuilder/dbbuilder.cpp b/tools/dbbuilder/dbbuilder.cpp index 7fbecf4e7..8852028e9 100644 --- a/tools/dbbuilder/dbbuilder.cpp +++ b/tools/dbbuilder/dbbuilder.cpp @@ -28,6 +28,7 @@ using namespace std; #include +#include "config.h" #include "dbbuilder.h" #include "systemcatalog.h" #include "liboamcpp.h" @@ -227,7 +228,7 @@ int main(int argc, char* argv[]) //@bug5554, make sure IDBPolicy matches the Columnstore.xml config try { - string calpontConfigFile(startup::StartUp::installDir() + "/etc/Columnstore.xml"); + string calpontConfigFile(std::string(MCSSYSCONFDIR) + "/columnstore/Columnstore.xml"); config::Config* sysConfig = config::Config::makeConfig(calpontConfigFile.c_str()); string tmp = sysConfig->getConfig("Installation", "DBRootStorageType"); diff --git a/tools/evalidx/CMakeLists.txt b/tools/evalidx/CMakeLists.txt deleted file mode 100644 index 859067bc5..000000000 --- a/tools/evalidx/CMakeLists.txt +++ /dev/null @@ -1,47 +0,0 @@ - -include_directories(${KDE4_INCLUDES} ${KDE4_INCLUDE_DIR} ${QT_INCLUDES} ) - - -########### next target ############### - -set(evalidx_SRCS evalidx.cpp) - -kde4_add_executable(evalidx ${evalidx_SRCS}) - -target_link_libraries(evalidx ${KDE4_KDECORE_LIBS} dmlpackageproc execplan joblist rowgroup writeengine brm dataconvert cacheutils dmlpackage messageqcpp loggingcpp configcpp rwlock @boost_thread_lib@ xml2 joiner oamcpp snmpmanager @boost_filesystem_lib@ @boost_date_time_lib@ multicast funcexp) - -install(TARGETS evalidx ${INSTALL_TARGETS_DEFAULT_ARGS}) - - -########### install files ############### - - - - -#original Makefile.am contents follow: - -## $Id: Makefile.am 333 2009-04-03 20:35:04Z rdempsey $ -### Process this file with automake to produce Makefile.in -# -#AM_CPPFLAGS = $(idb_cppflags) -#AM_CFLAGS = $(idb_cflags) -#AM_CXXFLAGS = $(idb_cxxflags) -#AM_LDFLAGS = $(idb_ldflags) -#bin_PROGRAMS = evalidx -#evalidx_SOURCES = evalidx.cpp -#evalidx_CPPFLAGS = @idb_common_includes@ $(AM_CPPFLAGS) -#evalidx_LDFLAGS = @idb_common_ldflags@ -ldmlpackageproc -lexecplan -ljoblist -lrowgroup -lwriteengine -lbrm \ -#-ldataconvert -lcacheutils -ldmlpackage -lmessageqcpp -lloggingcpp -lconfigcpp -lrwlock -l@boost_thread_lib@ -lxml2 \ -#-ljoiner -loamcpp -lsnmpmanager -l@boost_filesystem_lib@ -l@boost_date_time_lib@ @netsnmp_libs@ -lmulticast -lfuncexp \ -#$(AM_LDFLAGS) -# -#test: -# -#coverage: -# -#leakcheck: -# -#docs: -# -#bootstrap: install-data-am -# diff --git a/tools/evalidx/checkidx.py b/tools/evalidx/checkidx.py deleted file mode 100755 index c58b68dd3..000000000 --- a/tools/evalidx/checkidx.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/python - -import os, sys, glob, shutil, xml.dom.minidom - -def find_paths(): - - """Find DBRoot and BulkRoot.""" - try: - config_file = os.environ['COLUMNSTORE_CONFIG_FILE'] - except KeyError: - try: - config_file = '/usr/local/mariadb/columnstore/etc' - os.lstat(config_file) - except: - sys.exit('No config file available') - - - xmldoc = xml.dom.minidom.parse(config_file) - bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0] - db_node = xmldoc.getElementsByTagName('DBRoot')[0] - - bulk_dir = bulk_node.childNodes[0].nodeValue - data_dir = db_node.childNodes[0].nodeValue - - return (bulk_dir, data_dir) - - -def validate_indexes(job_file): - index_files = [] - xmldoc = xml.dom.minidom.parse(job_file) - - for index_node in xmldoc.getElementsByTagName('Index'): - curTreeOid = index_node.getAttribute('iTreeOid') - curListOid = index_node.getAttribute('iListOid') - curMapOid = index_node.getAttribute('mapOid') - curIdxCmdArg = ' -t ' + curTreeOid + ' -l ' + curListOid + ' -v -c ' + curMapOid + ' -b 4' + ' > idxCol_' + curMapOid+'.out' -# print curIdxCmd -# exec_cmd( genii + '/tools/evalidx/evalidx', curIdxCmd ) - index_files.append( curIdxCmdArg ) - - return index_files - -def exec_cmd(cmd, args): - """Execute command using subprocess module or if that fails, - use os.system - """ - - try: - import subprocess - - try: - retcode = call(cmd + " "+args, shell=True) - if retcode < 0: - print >>sys.stderr, "Child was terminated by signal", -retcode - sys.exit(-1) - - else: - print >>sys.stderr, "Child returned", retcode - - except OSError, e: - - print >>sys.stderr, "Execution failed:", e - sys.exit(-1) - except: - res = os.system(cmd+' '+args) - if res: - sys.exit( res ) - - - -def main(): - """ - Validate indexes.. - """ - - if not os.access('.', os.W_OK): - os.chdir('/tmp') - print 'Changing to /tmp to have permission to write files' - - if len(os.getenv('LD_LIBRARY_PATH'))<5: - print 'Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH') - - home = os.getenv('HOME') - genii = home+'/genii' - - (bulkroot, dbroot) = find_paths() - - if len(glob.glob(bulkroot+'/job/Job_300.xml')) == 0: - sys.exit("No Job_300.xml exist ") - - indexes = validate_indexes(bulkroot+'/job/Job_300.xml') - for idxCmdArg in indexes: - print idxCmdArg - exec_cmd( genii + '/tools/evalidx/evalidx', idxCmdArg ) - - -## the following line allows either interactive use or module import -if __name__=="__main__": main() diff --git a/tools/evalidx/evalidx.cpp b/tools/evalidx/evalidx.cpp deleted file mode 100644 index 473de24fc..000000000 --- a/tools/evalidx/evalidx.cpp +++ /dev/null @@ -1,388 +0,0 @@ -/**************************************************************** - * $Id$ - * - ***************************************************************/ - -/** @file - * Validataion tool for index validation - * - * This tool is to validate the index tree and list structure. It starts - * from the index tree file, walk through the tree structure until it hits - * a leaf node, then locates the index list block based on the leaf pointer. - * It continues to get all the RIDs for that index key, and also goes to - * the column OID file to validate the column value with the index key. - */ - -#include -#include -#include -#include -#include -#include -using namespace std; - -#include - -#include "bytestream.h" -using namespace messageqcpp; - -#include "dmlpackageprocessor.h" -using namespace dmlpackageprocessor; - -#include "writeengine.h" -#include "we_indextree.h" -using namespace WriteEngine; - -#include "configcpp.h" -using namespace config; - -#include "dm.h" - -/** Debug macro */ -#define _DEBUG 0 -#if _DEBUG -#define DEBUG cout -#else -#define DEBUG if (0) cout -#endif - -namespace -{ - -const streamsize entrysize = sizeof(IdxBitTestEntry); -const streamsize subbloacksize = SUBBLOCK_TOTAL_BYTES; -const streamsize listHdrSize = sizeof(IdxRidListHdr); -uint32_t treeOID, listOID; -uint32_t colOID = 0; -uint32_t columnSize = 0; -ifstream indexTreeFile, indexListFile, columnFile; -bool vFlag = false; -bool nFlag = false; -int64_t keyNumber = 0; -FILE* pFile; -IndexList indexList; -u_int64_t keyvalue; -int totalRids = 0; - -void usage() -{ - cout << "evalidx [-h] -t OID -l OID [-v -c OID -b colSize -k keyvalue -n ]" << endl; - cout << "\t-h display this help" << endl; - cout << "\t-t OID index tree" << endl; - cout << "\t-l OID index list" << endl; - cout << "\t-v validate index value (need to go with -c and -b)" << endl; - cout << "\t-c OID column" << endl; - cout << "\t-b column size in number of byte (default = 4)" << endl; - cout << "\t-k keyvalue to return index list header for this key" << endl; - cout << "\t-n read RID from tree design" << endl; -} - -int oid2file(uint32_t oid, string& filename) -{ -//ITER17_Obsolete -// This code and this program is obsolete at this point since we are not -// currently supporting indexes. This function and it's use of getFileName -// needs to be changed, if we ever resurrect this program, since getFileName -// now normally requires the DBRoot, partition, and segment number in -// addition to the OID. -#if 0 - FileOp fileOp; - char file_name[WriteEngine::FILE_NAME_SIZE]; - - if (fileOp.getFileName(oid, file_name) == WriteEngine::NO_ERROR) - { - filename = file_name; - return 0; - } - else - { - cerr << "WriteEngine::FileOp::getFileName() error!" << endl; - return -1; - } - -#endif - return 0; -} - -int validateValue(WriteEngine::RID rid, int64_t key) -{ - int64_t byteoffset = rid * columnSize; - ByteStream::byte inbuf[columnSize]; - int64_t colVal = 0; - - columnFile.seekg(byteoffset, ios::beg); - columnFile.read(reinterpret_cast(inbuf), columnSize); - memcpy(&colVal, inbuf, columnSize); - - if (key != colVal) - { - cerr << "rowid: " << rid << endl - << "index: " << key << endl - << "column: " << colVal << endl; - return 1; - } - - return 0; -} - -void walkBlock (streamsize byteoffset) -{ - int64_t newByteoffset = 0; - int fbo; - int groupNo; - ByteStream::byte inbuf[entrysize]; - ByteStream::byte listHdr[listHdrSize]; - IdxBitTestEntry* entry; - IdxRidListHdr* hdrEntry; - IdxRidListHdrSize* hdrSize; - - // get group number - indexTreeFile.seekg(byteoffset, ios::beg); - indexTreeFile.read(reinterpret_cast(inbuf), entrysize); - - if (indexTreeFile.eof()) return; - - entry = (IdxBitTestEntry*) inbuf; - groupNo = entry->group; - - // continue to walk next stage if not leaf node for each entry in the group - for (int i = 0; i < 1 << groupNo; i++) - { - indexTreeFile.seekg(byteoffset, ios::beg); - indexTreeFile.read(reinterpret_cast(inbuf), entrysize); - - if (indexTreeFile.eof()) return; - - entry = (IdxBitTestEntry*) inbuf; - byteoffset += entrysize; - - DEBUG << ": fbo=" << (int)entry->fbo << - " sbid=" << entry->sbid << " sbentry=" << entry->entry << - " group=" << entry->group << " bittest=" << entry->bitTest << - " type=" << entry->type << endl; - - if (entry->type == WriteEngine::EMPTY_ENTRY || - entry->type == WriteEngine::EMPTY_LIST || - entry->type == WriteEngine::EMPTY_PTR) - continue; - - // convert lbid to real fob number - uint16_t dbRoot; - uint32_t partition; - uint16_t segment; - BRMWrapper::getInstance()->getFboOffset(entry->fbo, dbRoot, partition, segment, fbo); - newByteoffset = ((int64_t)fbo) * BLOCK_SIZE + entry->sbid * subbloacksize + entry->entry * entrysize; - - if (entry->type > 6) - { - cerr << "invalid type= " << entry->type << endl; - cerr << "fbo= " << fbo << " sbid= " << entry->sbid << " entry= " << entry->entry << endl; - throw runtime_error("invalid type of tree block"); - } - - // stop walking index tree if leaf node. go walk index list then - if (entry->type == LEAF_LIST) - { - keyNumber++; - IdxEmptyListEntry listPtr; - int size, rc; - CommBlock cbList; - - listPtr.fbo = entry->fbo; - listPtr.sbid = entry->sbid; - listPtr.entry = entry->entry; - - indexListFile.seekg(newByteoffset, ios::beg); - indexListFile.read(reinterpret_cast(listHdr), listHdrSize); - hdrEntry = reinterpret_cast(listHdr); - hdrSize = reinterpret_cast(listHdr); - DEBUG << "\nkey= " << hdrEntry->key - << " rowsize= " << hdrSize->size; - - // add feather for Jean. print out list header for a given key value - if (keyvalue == hdrEntry->key) - { - cerr << "fbo= " << listPtr.fbo - << " sbid= " << listPtr.sbid - << " entry= " << listPtr.entry - << " key : " << keyvalue << endl; - } - - cbList.file.oid = listOID; - cbList.file.pFile = pFile; - //WriteEngine::RID ridArray[MAX_BLOCK_ENTRY*10]; - int rSize = 0; - rSize = hdrSize->size; - WriteEngine::RID* ridArray = new WriteEngine::RID[rSize]; - size = 0; - - if (!nFlag) - rc = indexList.getRIDArrayFromListHdr(cbList, hdrEntry->key, &listPtr, ridArray, size); - else - rc = indexList.getRIDArrayFromListHdrNarray(cbList, hdrEntry->key, &listPtr, ridArray, size, true); - - totalRids = totalRids + size; - - if (rc) - { - cerr << "Get RID array failed for index block: " << rc << endl; - cerr << "new byte offset= " << newByteoffset << endl; - cerr << "file good? " << indexListFile.good() << endl; - cerr << "fbo= " << listPtr.fbo - << " sbid= " << listPtr.sbid - << " entry= " << listPtr.entry << endl; - - for (int64_t j = 0; j < size; j++) - cerr << " " << ridArray[j] << endl; - - throw runtime_error("Get RID array failed"); - } - - if (hdrSize->size != static_cast(size)) - { - cerr << "row size not match with list header" << endl; - cerr << "fbo= " << listPtr.fbo - << " sbid= " << listPtr.sbid - << " entry= " << listPtr.entry << endl; - - for (int64_t j = 0; j < size; j++) - cerr << " " << ridArray[j] << endl; - - throw runtime_error("row size not match with list header"); - } - - for (int64_t j = 0; j < size; j++) - { - DEBUG << " " << ridArray[j] << endl; - - // validate column value with the index value - if (vFlag) - idbassert(validateValue(ridArray[j], hdrEntry->key) == 0); - } - - delete [] ridArray; - } - else - walkBlock(newByteoffset); - } - -} - -} - -int main(int argc, char* argv[]) -{ - int c; - int i; - string filename; - - while ((c = getopt(argc, argv, "ntlhbcvk")) != EOF) - switch (c) - { - case 't': - treeOID = atoi(argv[optind]); - - if (oid2file(treeOID, filename)) return 1; - - DEBUG << "tree: " << filename << endl; - indexTreeFile.open(filename.c_str()); - break; - - case 'l': - listOID = atoi(argv[optind]); - - if (oid2file(listOID, filename)) return 1; - - DEBUG << "list: " << filename << endl; - - indexListFile.open(filename.c_str()); - pFile = fopen(filename.c_str(), "rb"); - - if (!pFile) - { - cerr << "Invalid OID " << listOID << " for index list" << endl; - exit(1); - } - - break; - - case 'v': - vFlag = true; - break; - - case 'c': - colOID = atoi(argv[optind]); - - if (oid2file(colOID, filename)) return 1; - - DEBUG << "column: " << filename << endl; - columnFile.open(filename.c_str()); - break; - - case 'b': - columnSize = atoi(argv[optind]); - break; - - case 'k': - keyvalue = atoi(argv[optind]); - break; - - case 'h': - usage(); - return 0; - break; - - case 'n': - nFlag = true; - break; - - default: - usage(); - return 1; - break; - } - - if ((argc - optind) < 1) - { - usage(); - return 1; - } - - if (argc < 5) - { - usage(); - return 1; - } - - if (vFlag && (colOID == 0 || columnSize == 0)) - { - cerr << "Please provide both -c and -b option if -v is indicated." << endl; - usage(); - return 1; - } - - if (vFlag && !columnFile.good()) - { - cerr << "Bad column OID" << endl; - return 1; - } - - if (!indexTreeFile.good() || !indexListFile.good()) - { - cerr << "Bad index OIDs" << endl; - return 1; - } - - - // walk through the index tree file - for (i = 0; i < 32; i++) - walkBlock (0 * BLOCK_SIZE + 1 * subbloacksize + i * entrysize); - - cout << "\n" << keyNumber << " index value validated!" << endl; - cout << "Total RIDs for this column=" << totalRids << endl; - indexListFile.close(); - indexTreeFile.close(); - fclose(pFile); - return 0; -} - diff --git a/tools/setConfig/CMakeLists.txt b/tools/setConfig/CMakeLists.txt index 19fd99a0b..4dcc511c4 100644 --- a/tools/setConfig/CMakeLists.txt +++ b/tools/setConfig/CMakeLists.txt @@ -1,6 +1,7 @@ include_directories( ${ENGINE_COMMON_INCLUDES} ) +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/configxml.sh.in" "${CMAKE_CURRENT_SOURCE_DIR}/configxml.sh" @ONLY) ########### next target ############### diff --git a/tools/setConfig/configxml.sh b/tools/setConfig/configxml.sh.in similarity index 96% rename from tools/setConfig/configxml.sh rename to tools/setConfig/configxml.sh.in index d78953660..789a9f3af 100755 --- a/tools/setConfig/configxml.sh +++ b/tools/setConfig/configxml.sh.in @@ -38,7 +38,7 @@ case "$1" in echo "Old value of $2 / $3 is $oldvalue" - calxml=$InstallDir/etc/Columnstore.xml + calxml=@ENGINE_SYSCONFDIR@/columnstore/Columnstore.xml seconds=$(date +%s) cp $calxml $calxml.$seconds diff --git a/utils/configcpp/config.h b/utils/configcpp/config.h deleted file mode 100644 index da3943ccd..000000000 --- a/utils/configcpp/config.h +++ /dev/null @@ -1,108 +0,0 @@ -// A good set of defaults for the dev compile -#ifndef CONFIGCPP_CONFIG_H__ -#define CONFIGCPP_CONFIG_H__ - -#ifndef HAVE_CONFIG_H - -#ifndef _MSC_VER - -#define HAVE_ALARM 1 -#define HAVE_ALLOCA 1 -#define HAVE_ALLOCA_H 1 -#define HAVE_ARPA_INET_H 1 -#define HAVE_DECL_STRERROR_R 1 -#define HAVE_DLFCN_H 1 -#define HAVE_DUP2 1 -#define HAVE_FCNTL_H 1 -#define HAVE_FLOOR 1 -#define HAVE_FORK 1 -#define HAVE_FTIME 1 -#define HAVE_FTRUNCATE 1 -#define HAVE_GETHOSTBYNAME 1 -#define HAVE_GETPAGESIZE 1 -#define HAVE_GETTIMEOFDAY 1 -#define HAVE_INET_NTOA 1 -#define HAVE_INTTYPES_H 1 -#define HAVE_ISASCII 1 -#define HAVE_LIMITS_H 1 -#define HAVE_LOCALTIME_R 1 -#define HAVE_MALLOC 1 -#define HAVE_MALLOC_H 1 -#define HAVE_MBSTATE_T 1 -#define HAVE_MEMCHR 1 -#define HAVE_MEMMOVE 1 -#define HAVE_MEMORY_H 1 -#define HAVE_MEMSET 1 -#define HAVE_MKDIR 1 -#define HAVE_NETDB_H 1 -#define HAVE_NETINET_IN_H 1 -#define HAVE_POW 1 -#define HAVE_PTRDIFF_T 1 -#define HAVE_REGCOMP 1 -#define HAVE_RMDIR 1 -#define HAVE_SELECT 1 -#define HAVE_SETENV 1 -#define HAVE_SETLOCALE 1 -#define HAVE_SOCKET 1 -#define HAVE_STDBOOL_H 1 -#define HAVE_STDDEF_H 1 -#define HAVE_STDINT_H 1 -#define HAVE_STDLIB_H 1 -#define HAVE_STRCASECMP 1 -#define HAVE_STRCHR 1 -#define HAVE_STRCSPN 1 -#define HAVE_STRDUP 1 -#define HAVE_STRERROR 1 -#define HAVE_STRERROR_R 1 -#define HAVE_STRFTIME 1 -#define HAVE_STRINGS_H 1 -#define HAVE_STRING_H 1 -#define HAVE_STRRCHR 1 -#define HAVE_STRSPN 1 -#define HAVE_STRSTR 1 -#define HAVE_STRTOL 1 -#define HAVE_STRTOUL 1 -#define HAVE_STRTOULL 1 -#define HAVE_SYSLOG_H 1 -#define HAVE_SYS_FILE_H 1 -#define HAVE_SYS_MOUNT_H 1 -#define HAVE_SYS_SELECT_H 1 -#define HAVE_SYS_SOCKET_H 1 -#define HAVE_SYS_STATFS_H 1 -#define HAVE_SYS_STAT_H 1 -#define HAVE_SYS_TIMEB_H 1 -#define HAVE_SYS_TIME_H 1 -#define HAVE_SYS_TYPES_H 1 -#define HAVE_SYS_WAIT_H 1 -#define HAVE_UNISTD_H 1 -#define HAVE_UTIME 1 -#define HAVE_UTIME_H 1 -#define HAVE_VALUES_H 1 -#define HAVE_VFORK 1 -#define HAVE_WORKING_VFORK 1 -#define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 -//#define PACKAGE "calpont" -//#define PACKAGE_BUGREPORT "support@calpont.com" -//#define PACKAGE_NAME "Calpont" -//#define PACKAGE_STRING "Calpont 1.0.0" -//#define PACKAGE_TARNAME "calpont" -//#define PACKAGE_VERSION "1.0.0" -#define PROTOTYPES 1 -#define RETSIGTYPE void -#define SELECT_TYPE_ARG1 int -#define SELECT_TYPE_ARG234 (fd_set *) -#define SELECT_TYPE_ARG5 (struct timeval *) -#define STDC_HEADERS 1 -#define STRERROR_R_CHAR_P 1 -#define TIME_WITH_SYS_TIME 1 -#define VERSION "1.0.0" -#define __PROTOTYPES 1 -#define restrict __restrict - -#else // _MSC_VER -#endif - -#endif //!HAVE_CONFIG_H - -#endif //!CONFIGCPP_CONFIG_H__ - diff --git a/utils/configcpp/configcpp.cpp b/utils/configcpp/configcpp.cpp index 459a6242b..42277ab84 100644 --- a/utils/configcpp/configcpp.cpp +++ b/utils/configcpp/configcpp.cpp @@ -107,7 +107,7 @@ Config* Config::makeConfig(const char* cf) if (defaultFilePath.empty()) { fs::path configFilePath; - configFilePath = fs::path(installDir) / fs::path("etc") / defaultCalpontConfigFile; + configFilePath = fs::path(MCSSYSCONFDIR) / fs::path("columnstore") / defaultCalpontConfigFile; defaultFilePath = configFilePath.string(); } @@ -383,7 +383,7 @@ void Config::writeConfig(const string& configFile) const const fs::path saveCalpontConfigFileTemp("Columnstore.xml.columnstoreSave"); const fs::path tmpCalpontConfigFileTemp("Columnstore.xml.temp1"); - fs::path etcdir = fs::path(fInstallDir) / fs::path("etc"); + fs::path etcdir = fs::path(MCSSYSCONFDIR) / fs::path("columnstore"); fs::path dcf = etcdir / fs::path(defaultCalpontConfigFile); fs::path dcft = etcdir / fs::path(defaultCalpontConfigFileTemp); diff --git a/utils/configcpp/writeonce.cpp b/utils/configcpp/writeonce.cpp deleted file mode 100644 index 01ed162bd..000000000 --- a/utils/configcpp/writeonce.cpp +++ /dev/null @@ -1,232 +0,0 @@ -/* Copyright (C) 2014 InfiniDB, Inc. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; version 2 of - the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301, USA. */ - -// $Id: writeonce.cpp 3495 2013-01-21 14:09:51Z rdempsey $ - -#include "writeonce.h" - -#include -#include -#include -#include -#include -#include -//#define NDEBUG -#include -#include -using namespace std; - -#include -using namespace boost; - -#include "bytestream.h" -using namespace messageqcpp; - -#include "installdir.h" - -namespace -{ -const string DefaultWriteOnceConfigFilename("woparms.dat"); -} - -namespace config -{ - -//If you add parm, you need to update all the methods below until the next comment - -void WriteOnceConfig::initializeDefaults() -{ - string tmpDir = startup::StartUp::tmpDir(); - - fLBID_Shift = make_pair("13", false); - fDBRootCount = make_pair("1", false); - fDBRMRoot = make_pair("/mnt/OAM/dbrm/BRM_saves", false); - string file = tmpDir + "/ColumnstoreShm"; - fSharedMemoryTmpFile1 = make_pair(file, false); - fTxnIDFile = make_pair("/mnt/OAM/dbrm/SMTxnID", false); - file = tmpDir + "/CalpontSessionMonitorShm"; - fSharedMemoryTmpFile2 = make_pair(file, false); -} - -void WriteOnceConfig::setup() -{ - typedef EntryMap_t::value_type VT; - - fEntryMap.insert(VT("PrimitiveServers.LBID_Shift", &fLBID_Shift)); - fEntryMap.insert(VT("SystemConfig.DBRootCount", &fDBRootCount)); - fEntryMap.insert(VT("SystemConfig.DBRMRoot", &fDBRMRoot)); - fEntryMap.insert(VT("SessionManager.SharedMemoryTmpFile", &fSharedMemoryTmpFile1)); - fEntryMap.insert(VT("SessionManager.TxnIDFile", &fTxnIDFile)); - fEntryMap.insert(VT("SessionMonitor.SharedMemoryTmpFile", &fSharedMemoryTmpFile2)); - - ByteStream ibs = load(); - - if (ibs.length() > 0) - unserialize(ibs); - else - initializeDefaults(); -} - -void WriteOnceConfig::serialize(ByteStream& obs) const -{ - obs << WriteOnceConfigVersion; - - obs << fLBID_Shift.first; - obs << fDBRootCount.first; - obs << fDBRMRoot.first; - obs << fSharedMemoryTmpFile1.first; - obs << fTxnIDFile.first; - obs << fSharedMemoryTmpFile2.first; -} - -void WriteOnceConfig::unserialize(ByteStream& ibs) -{ - uint32_t version; - ibs >> version; - - if (version < WriteOnceConfigVersion) - { - ostringstream oss; - oss << "Invalid version found in WriteOnceConfig file: " << version; - throw runtime_error(oss.str().c_str()); - } - else if (version > WriteOnceConfigVersion) - { - ostringstream oss; - oss << "Invalid version found in WriteOnceConfig file: " << version; - throw runtime_error(oss.str().c_str()); - } - - ibs >> fLBID_Shift.first; - fLBID_Shift.second = true; - ibs >> fDBRootCount.first; - fDBRootCount.second = true; - ibs >> fDBRMRoot.first; - fDBRMRoot.second = true; - ibs >> fSharedMemoryTmpFile1.first; - fSharedMemoryTmpFile1.second = true; - ibs >> fTxnIDFile.first; - fTxnIDFile.second = true; - ibs >> fSharedMemoryTmpFile2.first; - fSharedMemoryTmpFile2.second = true; -} - - -//End of methods that need to be changed when adding parms - -ByteStream WriteOnceConfig::load() -{ - ByteStream bs; - - if (access(fConfigFileName.c_str(), F_OK) != 0) - { - initializeDefaults(); - return bs; - } - - idbassert(access(fConfigFileName.c_str(), F_OK) == 0); - - ifstream ifs(fConfigFileName.c_str()); - int e = errno; - - if (!ifs.good()) - { - ostringstream oss; - oss << "Error opening WriteOnceConfig file " << fConfigFileName << ": " << strerror(e); - throw runtime_error(oss.str().c_str()); - } - - ifs >> bs; - return bs; -} - -void WriteOnceConfig::save(ByteStream& ibs) const -{ - ofstream ofs(fConfigFileName.c_str()); - int e = errno; - - if (!ofs.good()) - { - ostringstream oss; - oss << "Error opening WriteOnceConfig file " << fConfigFileName << ": " << strerror(e); - throw runtime_error(oss.str().c_str()); - } - - ofs << ibs; -} - -WriteOnceConfig::WriteOnceConfig(const char* cf) -{ - string cfs; - - if (cf != 0) - cfs = cf; - else - cfs = startup::StartUp::installDir() + "/etc/" + DefaultWriteOnceConfigFilename; - - fConfigFileName = cfs; - - setup(); -} - -void WriteOnceConfig::setConfig(const string& section, const string& name, const string& value, bool force) -{ - EntryMap_t::iterator iter; - iter = fEntryMap.find(string(section + "." + name)); - - if (iter == fEntryMap.end()) - { - ostringstream oss; - oss << "Invalid request for " << section << '.' << name; - throw runtime_error(oss.str().c_str()); - } - - if ((*iter->second).second && !force) - { - ostringstream oss; - oss << "Invalid attempt to write read-only " << section << '.' << name; - throw runtime_error(oss.str().c_str()); - } - - (*iter->second).first = value; - (*iter->second).second = true; - - ByteStream obs; - serialize(obs); - save(obs); -} - -const string WriteOnceConfig::getConfig(const string& section, const string& name) const -{ - string val; - EntryMap_t::const_iterator iter; - iter = fEntryMap.find(string(section + "." + name)); - - if (iter == fEntryMap.end()) - { - ostringstream oss; - oss << "Invalid request for " << section << '.' << name; - throw runtime_error(oss.str().c_str()); - } - - val = (*iter->second).first; - - return val; -} - -} - diff --git a/utils/infinidb_hadoop/InfiniDB_Hadoop.jar b/utils/infinidb_hadoop/InfiniDB_Hadoop.jar deleted file mode 100755 index 1ddec4b198f59432e9eaad7bc101a6d0e8df2362..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 53522 zcmb5VWpEtfk}N8VES4pWm@H<75zAs`wwRfjdBn`jU@tL<#L@F$(Xl!WfV5DfQZ)EI1Z)m0O=vbuw$52@f{X@qjK24h7TVJ*+P7^bWM5rEo zaG`wNY)@=r5J<*S*F*>8KRUrE1!v*fmRq?txK>u#q(UR9DbFl{122vVcb+k77ELo=cK|29|;B!!nqX;2hW}*4vqp1$K(|tzxN}Rn0eWl{G2V3JYIC}f`w$% zo>;q}9$+6V++SQ1_kBWoJC)BulCe#&H#AOqPzKSFq6%oWt<#+?*yKG+fEeMeMBT-? zHV=rpS8U)wfe$H_>#Y9K9F=h%Kk1-HE9u=hi6a^BDHK+8y|6?7Ew)_>ha*j^dbc(J z*okOO!TC_Rd2niC^b_f2Qk@wPMT*xV+@D$dZ2jAKne`siLWrnM;|ra%fTt3xUb0}n zG1rk^6+Aob920T`HbHZVrfTw8YVay^o0b^16Lzcfgm7?V2bW3iMleo=U2ZaS@M>NG zQe^Y|&Ff?k+&d_s{{(v+d<{i^Xg1`*c;b)-&n*R{;hyZQ6}E_D^1M&}yV#Pp=%{^t zjpxstWMsRf7?l=%^wMdmSRm+BAi*7*{G=%nn|~nD$r+?8q z>6ILe(H}W?oj6Af5a3@Ad7e;_V6c#UL2QJ|GhI*(=};Sv>f<$KwxAm`irIOg=#e?i zSKu&dL^-Zxp^{Z@~ zm3AVEENzJrqjSVmEE?Dlk&W$*uENljnt!D36+b;0mQhZ@Hje`ufgmnLbJAkn+#V11 zVbNsDD}3-^S8sBK)UapF2?t)wxg#6`5_?tGCda;{qIdXWQ&_ zT3Mcv@-7QQPsb!Q)QbZ#!*^?1ert@yM!B2w_(!it>ku9TTcbxsjA zF@HaJ$(*=BRo~r&x+`f0z1s{-_cyn|o41iZJMZ_Or)jO@J#%3zwoqYoRwsNkNJHP6 z#d>=P*4C)h?W}Wkk!Z+|spx|89z)$@IhWF^OJyR*23r>EH3Lh&$+vCQYl=S$;cpMd zBsR5^)$q^WDs>dY@t`-boVJhKA@MKY!oS6n8cjfydCm2?UwLVV+=n;4_qCBhQ3?{@ zYa*1ja(t|q1%M9St44TIj%=t7d;v<`z*^+-(9j-Tr_4Tc?01=AMxESN?OEjw+a?g{ z)bHO5h3Mdgy(0x80=mwK!Gn@~ZTIXhrS^k1tzYFtrP9G~-64@L~gi78Ue)2{d~I)lH%n*D5Dt|}%jIsw-^zsaW1 zf3F@?67&dOe=qAOf>7V9#P4CYodiqv#9aBhbE}G+=TUt5kk$kQ)y@sAH4UBfj`EvV z;14Z|WSVgaVB!9JcCA%(XYjYpUE}_B6s0_#i^HoP3Z_aNsw(N`&Ji<2t_5Ejhow*` zYEUc_HSmLbjUzilV>~InN^7t^n+DndVlx~XYhbv>t7h2&&Kg*(VPa7BT4 zxQjY!(|luwZ&=5BjKB@f(J|R_j?w6{Mmyn<;?uM0(QT5Y7s zeXXGJR_4v#2Xa^2snhfuO3n#SD1@dZ^`5f3SX47tl+j=>>sE;l4PP!nX2+mMw>Ga4 zCQ35$uI%_$xF;tAi@6tL6!N^2)SExm0O44;ua$Hr8hpsjNqGwoi{!qNoSuNppee1n zcDmZQZLEF~WbIKs zQ{Y8%`SDIFw&YM^)X!@BfE~HO*kt~;y8;Qcki^CjidA}60vV1!HwdN;01m)g!BkTXa|DQp5dnIRM>9Hjz0< z`Ia+kY)*He1ZQuzl*M+5BF=gSVT5PEk&Fvyo5l?UA>xKGf1dyaE7TTS88MbI>O6S8 z-(F26Bk|ier~g#^%GTU^3d;6!%_Ny^T2^iO`9A%g+eH#AX{_#QuXLYT6E}!e_gj7N2ckixiBS@+hDnU^AB?n-Dk8a z)+|4D@!SpQ^Vs!8FtS>%!|k{gxh(d)QJ%bDOkFdF?d1Dn^PVCs8d zTGoYqkILm|6pHEm+K+;FsHsSBUicPGHSab3P(nL7%FK{6toUT-m9vLV4V8_T>lV(F z&2*a+$qRHMBQdNJqDWRQiX?L;k8+MjFFwg4qwlr-R%ACe;$SxVp?rrT`#D1*hkG0W z!B*tKMyFS+l+eFPuG$vy_5`uhZP06K*xUMr(bs;*5fT~0Zgl0xUpR!sn>x-FOS3+% z^245T8Pnv4-|E~PL?Yn8tWCA5!HH0I8#awd{>@e;gs})|zYqtg%;G8GQNkQC?lyn_ zhpyw@VUA4U6HG1s8_jZk^!D&;V4-`wY@AbiAqxYM_7L2{0A+}K<^0{?mA=$cZ~w<; zp+xF2M7>XFoq3ucb!p$n*MBrQ*e<1@8w40w&)0v|oc*`+l87<%SZLXdzegu@(JYJ$ z3`w$F*nK9_q@4*F%03xQ+_e1BeJ5aCk+iF(557hz)-q2&`Zb5Myg^D(c(^*f=AC4^iXOU-g16$xKFzw(S_jdC87{u|nxUHpHeI1?N zM%a~y2>or-9`;&eJzy=buQ9(#k4>6;Q4RNRwMR!2V^;~28pFM7{S?{toohp6`0g+N zsJ1%FWJ4%$Ffd=pe^qU?|La%EKg(-U9o9{G5&c7sISrWB5&KPC4~*zrZbA*DytD*Z zFj#ynR6H0+#4U}dzsHDtB7dN$rCC*TnSQCPu}MW4fEiERqEe)4`Q$y@wC1f_p>1{Q zyPZKd3TD_m&7iy0dEMnX&3nxM$@#S5zMB)n`$f-xVkVX(^%Ob#9Qmcaz04JBGf!TZ z%@c8HCj1bww@ckgg>u7En2iH+kUQ#k4JI_HGoD?usa@Ts zc{lwfy|Ttj#Jz5|IC)$yK|;qgYTu}gO|zm0)rqlWxapp`6&vs}YS^;E$7WcZ0DYCo z4D+>k4}~%Yf67?NQ#@!`>@;B2e;ES86aDI)DpX%7pcW_!q-3lX;*gs`#gOUAg@tLP zk5lp~dTXO$L?cX3r2Y1#SU$8?F*se?lcKtrB_azYllQ@Wo7Tm%)*vWgmY;X>(WUnn z--fG$bUa_tj8#lHu}M4a!?2^mM8#Heb7A3r5TxXXyt9xcj^v-qkL+BO#{83;_37wA z?Bdy=V)+_V!xdYyF)kWH$|0+{T5QdWkp^ruMq+APB>i$W^^#q@-o6XwseAPCdFQzr z?HbDQBB3GLw~du>dUQX&UOEkmDcWP>m1>)y%hdx(p!B`jPOW14gv#Z4di=y}tq@d6 zhCDDME_Akk-jLUYy4~g1GXeJIS?Xv6WmIZ)J#l&udh~A*`8cRwr7`@I*9BtgQx~g= zvv_y@7Ai)0Nub}5hZI8c5G-R02^JN*;N*$=Kba}f%Z9QMMSs{`!BR2bt53UqMBWQS zZ?l#?yQf6M*QQ{*ig%Q%8@g$SkF#l!W3k;wMAO)ju_4vSc^ek1D6m?%0c#1mrtebu zd%zraEd&H!$BL64@DC@)rykqv(Ot@h8@aYZp6a=nUS1~jfDr-G!Z=KT6wTwl2k^=| zFBMHo$JTh)g6Ir@jP`6SuM&PC;moaoS7;KE_N-};%+XWnswK39pZoB(Y+l=v&PCD8kF)c{hsbj`MQv0J}QyKNTCkuy96d3td zw!hyJ@zr}~1Fg|-tFYKtak#_|NJn9vD0d;5tKhv>vah2ub@&H7FZ5T#=5RG%xsGxa z7lk*`oV0km1Vvq9iBYQKkjY${U>`Ypyy!=U{l`n-L16x3i8F#;HGE7FXJ_7^rChy1 zxWlrMLN8m5*$BC9sZ7qyZQ-4lfI$qKNq2^E|9tp9=C3Ul$;ybkTRgt<-ILe|K~LWx z)pIw3oC5Pabyf}hn+pvoRpC*@`4h5T9n$E|p=`2nYX+)EAAb%H+?Z(vU{_GJ*|Wo% zeUJL~mGr2nw7YU*;O6=LWhj&@ijw*F-z4_? zoLjhZR8uFyNSo_eHNOlY)@}g$w>aFyonCm-g2K2Fb49@;$!^5)p~}grQUAwFkY8T zespiY9J#~8^a>X9^6cHBr3BtxOb};8Ax9o?VE9kuxe{bS~zH*ZRPT{J5$N$z!U!(_`!auIL-2@R0Eaqp;oBq)%s%H zhlzVK+onFKGglj?o>JSEh~y|hyo5&f^Q~xv|E+%f=8w9(?6JyoWJUS@1n@es=1;(& zt;$M`uFaZIZOydSp_*FHEq3yxN`za;&kY65*W_<<>ysb;XOIu>FVo*ww<2XF(*}^$ z8x(E7yXi*&smDYSJD+FrNUAZ@m5qGkEW)6`sn%P2XTC@uwq{1)9SH3EMz`%kSi$FN zTI;`g1HvnsunJZ&>{MWfZa9-|$5-IiJIVwS`37X^hi*?Bpw;wva@u&m5J>BB+mDQp zHU%g#a?)S1aZr9)(Q?L8;xa`vI1^A-R^bf~|1!Z5D)~t5h^()Qq+Gq?4D;L34hWZl zY^Z;HA+W%MoulRXO=^FW1=&EPqy6kV4#A!(*YwrjxnLr>N^M*+VH8j?!11e3IW5>C zEzq4AGs1{Zbp*U&?AHoui3W7iI`t5uVy8)i{234NN_e9%wei7GOWgT$H4#Oje$)$Z z5#l9au2|=#}_B$ zzE4uJ10^P8hPWmHGAknIkrc9<5$se-)1ToCjfdpuPsDyYQZm)Xg3*ic#KN0*?y2|s zKr$Ipl%FWY-HIme^AqVXoI}ezi1|eMM8&&ai0@~Uh2j`~O^#vNfd*ebE52xX0iepT zd&pi*35Qg&$gZ&mJb9zPXel}16FNTg`{5$(9^owkM|qhRUCQB z)XKM@ZFWyemHi?p0EMx&VVfi8kjE7!JaGo;^?=9-|J%z?JoAh6TOn3+;or`9E)qwP0R5}h(=S5%VRAt~N zIQ%WroUax23!EgTmt&EdDbqYz5d`>oynfD1t)C9iDBhIhbhn}br5)XH(9Yx4e=Ssp zs*IGtW)Z;mzhjMZ2MzH^8DP> z(^v&ZXfx~Z*r!j5FNs%ZAgNDDEQvGEt|c3^8i8+%(wk#-4J4=sXr&o_agfAI9c3P@ zV@iS5EX+)|Hsnr9nq+O7kNaf!2M1Yh!#j+M}62pejLJ>p@QAoBUrYI^ZCgIxX`@lCYLOnI440Fk%t!e zV1#js#Xt*3=_s-E*J1KT`S3jU8DNd4nNv+?dwsKWLv>@bqm!Q6=a1v6JQQeMJ6V0k zq|hM}Is`H!V4C9oFMYP;r433GbqTNn=pRa~d|9$=WI||biE9A05>^oWiz_lGr@fHA zy7Bz-Je3%PP^ERRY#zTEOARe<4{NOAutEB-?m4Z4AB>C*1&*XCJ;OD9Cf_;nBZ?m_ zS&AK0q8)5=ZJk=!PDG8_TZ-7xqk#AawQ(m+I@Tvuc5^DorcP4e-rD|MKg8C}Tzzdz za50U_jZn8)^W@BB<}EA&dJS7}&G64$4A~AnC9`DB^jRo&%>sHYL36RKv3V1TO#S3R zaO4k6phf{+6m!)Q6-kle4ust!0`EvQOqD>Ok!sq~>=5Ijt~y0`@ke{yi$18&g;$#F zqy#d3JQX1(IK40IM1XD<5dLel@GIiVi6Dv4rl1)ddtR7o&Vret)eQquUlfp;w%${Y z!oWtWPd<(_dt)V@g+(Sdw zJf$`)4(bS@nyAcA-4*&Q&e$cj<29|?_B_Wl5E zfA^!p-6nvPH!DPFjrE#T8-_U%zP5A=@1)tpt@WPjWEgB##EqyT1t94C5&N~B@u2T} zk-d%Vfq1Wtd}Z5^GYkJYMh#`d&yAM!$M1AKJJl?8I+%+4&i1@2z~aS0C$@@U-WC7m zJGZN%o=M#XAvD2wtO;xlOdr{CySM=U+W-K6}rKR z_QI7$g)xxQ!r3veX7Gp9T_9h2=1d>GusAhEq_;x=&ZjM|z`IgwlEr&y| z)I@C!8EgN(c{{{-REzGXheefjt*c0*dTI{iwlVcfhH2~`s7=UY! z)|+4~#6rbyV9aOrr4B;n4_RGD4ekv+)L+rot-|W+4b@cNhCFP`EOX1%iTKqsGL}H)275zd7)u8F z!7XT*pP!zW$3x@@9`q1fIg&}ItNP$bLMKw4@!1l=K>1fhBEyXPqkZpY$4}t2`vo*- zFb`5gy z{YVlz$Bh)-vk1?2n;!@6B_*v0E-A6EN8%QaMe7tLi1So)8+27HWE+376elpNQhqh})uh+%1pbvl?EplMbSO`(u%~^4PI!9W_;rZHbS4 zmV9Y67<_yRr_+I|d-jC>2F8FjXo{-Slv*T-xf`~I@sk`@ROQjr!G!B~amSsnHZMAW ztX%i64!vz~C~;a!X1V!8qP@a82&x}FJt+U66&*{QJjdH9{#vgCH13=fnM$B%1*edp>~ zpBMQ4;!iXF*}T-w0z0D48h9`_`9NaYqU;C{sh#4k;iV_V?tvwSq7Mhn?#P$JCLgGW za;tZXv*neS#sy9D^XmK$9L?_NkNNtXwR43DU#Xc1k28F-HR2Z=vm0x(OcC$EYIhQE z_N>5?Ue{Z4izPsIU~_MCYjG^sutWn`%G~y1-;MqYx6sXUZ^-%RAtI{}33eB(rnyUm z{+iQ-W(q9nR$JyzDdBUO1?q2=K&A3;bPMzY@i-!WHY@_Jv2^q*8#66&wMs2}E$Yif z#7i49lYo7aKI<$ZvsSvM4U7yMY~I~!R>Wq7oO4ec+5E=5TSzVkx@}30X4r8R5NVrT zQq2VlU(2Blq*;|%E=#~+vhLw{?sXSPY-V?zF=9(;rS(|n<4~VPpVFk(> zuJH?-H~R$X?S$*MV3kJ#2s?apZlOLuc&#G#_#7*Zjr37p&8^!-Xhi9dlp9fK{o|himKAi(is?QVSdBYmg;7#=hNBg^Y^@l zQc=sy`-5NRW%|BQQ3-I-h)8mOCo>k8n|q@AS{PWIA=F@RB+gRb!iRZ^4Liqu&U9XYADAzV_f|t<3>0_6dlbVjPk;1F?@O3l-%r;e4h=$OQsm2oEmx6XDl0RDl@!9 z$lKVOMiVjSzDBN-8@r!f&wLE7e&dxEsR1 zq(E4FA=4SqVgs|n6wUhbc$^{=9H!LNc2BfdoWZZrhepSQLCBX()Tm11Q z*WgOtBSf2T{Ie&XHW=F(h8Tkr>`HmowYSOY&{8)-Z=soQD-J_qKEyv6xCT~e!fp@e zu(}WIXxzz~QQN@@n!NoZ?up!uGC#j78 zewjerx#!m)2`WUHdj=QpcKlJ6@LfhS&ga@snL=m|*4#shgQTa$>sDj zP539`KR_#GbjCLgI54oIe+gRY{s+)1EGTMjW&EEVbxi+`S=G#c0FJSJ_|&n~p)tZ5 ztJtH+OX0zBaXsmlgl}O`dj0$bCz&pgYT7QWOiWQSSS?Vk=Qfxc(+g@VRWfp!*l1O) zq%GDPPUE&3pOUuBGd?bI){Zk)57jYN31!r~&pWrDJiA>j(B|0Y->IfkP^u2 zA@09;uwnnLBckaHEfQZoc`(Gy6D#n5TqNOF(*M5_t2o1FNCQ zq|$?OBYX=^WULN?|F|-spU(7JUIAQXYU@`%z}V)Xzq~Kd9sEyMnTz`!Jvv-CBoC{c zt?a$)`H@K_x529+WTSYy3Fwfk@iblmy>x6cZOj-!K!DL;VDwCf8*9N(btQBNU3pG+ zJ*sBc)(V>&g%XK5IJrw;ySL-DtWG!(Z!%20updEluGjfG$;=I`wpx z{3EQOOgnCe=^=Xh9_Ru z2Ix@hNug_aTuNVL{@mRP`ZcQ8T8np`a{A_nusGqYv`EhhPJLDzuv-#H2OH69K_b>dLrfPASdv}^ruIRH6rO6+x95NFpv^v$-@`_T4iZ_o6Z`9j?a~Jcd z7~RRM%%R4jP1eGH8jLH6fr|hJ#0}lU4dQ>?kmQfTq(>I(d(6t9xvRc ztXWAp+>_o^oEdtJZ|&qliT-A|VHT_=TB|J*5}6;OAWd=XQJ|XwnMK`pgW^#+XyB1( zr<{>+lnBS=#ljHP&5iefxvNS}t%B@yu@ssYe75HeAD)YHh|z))ROLr)5+!9RBV1|6 zFV*=qvMd9-*aZeAp)O;sDuq2n#VY9opaC+B0^lpmB5z5g@RfdrEo5owJx4vT&g;OE zhH93pqjoMOriT|?tqLX+r4(npHUF-jb#gkJ#_@F;-3s$2+CNAboqDm+=UClhHE*w= zMJRV-st@bS?-Um&W(pbHp5cR}Rw=zI2AZ%AG7J}q{7r-pj<|v)zL;GC^rvFBYBrgE zrxx@JnO7F7`Z)&RG-dT$0f=gq)I9^+E-&)h2bcx8$g(c zj@Te3=a0fEP}sE6Q^Q)`Z%O|(eq0Ske=s_YnA@S{Q)pKan?w0Hf(-CscZg9}kv-GM zwEo^1QPAvh%1B*i31Vt*bHQ>f6Ju;%V~hO5aL0(8@_U=P3~ZIB1tld9@%l*eFnyv0 z9R7lZ82wlE!N%aCN>g~zBdqU!KHOzgG%WEzD$J7ZSQA(D-Gnoal&r~(OsyX0FnzmE9xLa?nJ>DfP~X=e^+y!_#ZSpa@d$ZN|gnZ3X-m}-s()f+hjAzIC& z=BS7g703WL_`6Q1L+6d=l=x}`Ayiz5;HF>$JwRm^$qJr)2VC%G;y?&@hvZ=Lf?EU& z7wTo!dgko;m^$XjEo)n+B&xTB6+FN_BA$^;RNpz;AXljhTFmYk#sxZ@!e%RL%_-u? zDCOf!YRp&VYHB)}y)IIMi<>g^7}8rTG21+r=t_9SbBY6w@b@eowlD}14lGLi!TH38 z(|I>`dG_%B1=qrP%_F)=xxEz_km!wIk0XGfdRQ}l0JAfj6x2B=k$HuX*x_dv7|{ z52&VkzOS|5+%?(w?krb(m#eUd?cauMHlo|@zh3QrKJ9&J3VE%;l-uRQ+dqRO7;xta zf_H=L?kR9Xi`m_F!~4_^p}q;CG}-2ae+|a`I)I7?Sl*GS_3t!f$wmk6#-8kH4eQT_ zko4&RP3*hSro&!W9e5*&?7@ZySuSFucc5;0Jad&e$UscAQ+mRg{pPpSQ)`X9q*AJx za*IuwS;K0^osN%eQ+K)HC|>y0v!8GVgFrWAty1*YTbU`i8l8>N)Wq8^r%<|QjIPN& zesakhe6JKYR|4C8M6Z}sSAQL!92Vd({PP!1$&=r>&qO}Lh*DTlg!>efMLF#^N{WfI z>wIqr{2?JOn6c+6iL}>Slk)V;2#0V%;5qNjTD@cUy5#|>;Nu_r4lx1ma7a{EtMZCJ z9pVUgB~@ixU8`zpywK6!%Bk!Wq@Kv8l2sYJp}KHB;-4Snp+R9^;Fi!&-JS z(R`YTiN(RB?+wsvCe?3t;CU~id#4-gDU(Xi!=LYYhx9UYe9N@5aeB+s=i}oJu?ImF z9TE%V%ua7YL_ouo>=26nQdQka02{CtQ>(uvJQd{CUS5K-}raM(%m zh`Ni5)J=uhlQyjikbKUy-R#6BziZZzrqe2|Rf)Dqc(zU(6`z0dCZg`m7$A^1u$_BP-)=_bFfAHgv;+T-?7Z_npTW>ztG-CWL8ub$F{odd6 ztC46?gwanQ9mv?1J0rutdqpo~TY<5iS>V1N2D68y#5gvivMfHxL0(#=Wx3wy%rZSA zYnpA;+AYb-HZ}-Sv^V1lkdXE?=jp$pUeW^x+Igx}7(wi~_=HQ==K*dfOQ`lt`~NhN z>eu);Uv74CyjMIxLX9 zO!EZiy(R{hk~dezXQ5<&Wg9qKPrHX?T6rK**u9xP3lz`>%IJ^V(BR!GIU>$shVGUv z>ze}vSpY8z)48jVot-5`ba&Wi6b;^m5@472gn7W5Q0ZkmjVg~}C%krx`^U`wf{+s) zd&~HUu81?-+=v<8W1IVL89Ji@yErd}YTZ0n)@i!aE3OE5dM9ADtuxPB&otz^kuJgQ zf+49dc99M`WozGqWQ1>(^Cw?Fj9G1+(CXs_kZpFG05gHyEmB=u%Vz{fp<;<$Rg1G>PxA>fRq$Osak}m95lb59U92B5mM*O(pUC51sh$sU$gPr~fyW^bfP!f5eial(qk1b>p34YHfk*i7+ew!cf$Be(rZm}`;6Z#N&) z^GdrSi!LP{T4fvw&>Io|Uek17a_2S&z$Q~PTwkCpc~a<#AXI6J9XZKtEf5kxLk;>1 zlsmItssn=B{a|ymr(G}++8_^xHOc^j>kD6X5)7w;s$AtkdN^{2nNL*S1%gHb(%N^{ zEhBX*M0}O1glEicEfkA51)ODZU%8(TQSJEqTtu)QrYXt3dJd-)#a$UE88|G-IJc@yGVORG=Jyyj^BG;d8~8izrq9{w zW_6F|isToad6b!0?Env;80gbofyJs-XX;oxp!n z(Et8&QC(NURl)kihmk;24FAED+bFM@Q8;s-Ux_L_UI_)>fLJ`|v2K-I6KllMo`Dp} z!lk3*wPe4h=6x?RjD0o|+4U;M`7tFiQe-7HblW>|k?q)Z?bUt#q&>Cz_V9G=3)T`~ z1%?lPJUIu_V0K&`C{C`ga0sY}B1_NGscbn6=eAuHnMCZNm}xKKT|kAZOq8oIX-3FW z?C#+lpQ31}E4Ib?`dy^b3b$35#qWoy#Y1tAc$s;-4SIy?s05(GYm3xkY>WabTILK2 z)vm-0Ey@_ui$!$`W|u=2a%TQ)20~wFUVD5Cu>g|+=J_{BDz9hJtW3bSIG0nF*#(gD z9rC3gW+$~LEK!45m3pv3zuJ5?fM_PA&Z#nX)=H_$Qt-1dpFog;SRa8{_~okj6lOrt z`Y4?W%PM$_j<8492GhvFnyNKfLKdYWW%qPl#kkGtkfGUW$f2XVkz=~+{CNN0*M!zcwmV%5UEuvvt z!&tTF?1Ig$uK`LjmHe7t+Ybw?54l|Uc@9od5b|(VM`bSJ+=6WG&XQ!AwokU8>Hx(R ziT@($UQ>MMwzhbGX+vuUN9MbOt_(Auil_%_zE+6UpU}?^DU&_e zc_g@L;X}WvWc;fv*^R{=GpfxZx(_SErCai`9=UK-Zhh%Gc~>X*OZ;)q>70T$;WfBa zr~o6S9(VZDxNBWJgtQ@uTYn=7>Z*D;wpc`il`R`;mbLV$m742IoO>|A*K@t(;fpavyB)9+NXi?b0yzZUR zkPy4G`n{FQ^z(BNrX*RvDk6#F;=BiuUZU=bET6U)=rAPtQ|8UV1q{;*x9Rq<2snLBVxYvYvN5(oefqEs295BpYoHG zfu=>tEx4zI^XacN=66j=YmoyzNj(!+RtHUOYI;*P2cJ2}*~f+#)>g1=m+gWmrhu*> z&H3h!!#_O2w|5?Ca&a71tTJz)v@sJ?rAaUM@vLUX>^>q95JcJNO-%<8LLxH}*XE*Jz$C zDAybPlisuvXX7;cn2&azURKDbqEsR1jnmH`nP9r|k7U|jgB!k;rNiZR;p7V!1mY-F+7a%fE#`NCJ)7OU|J$5Pj%{;@`fC{Wfd9{PZv6j>6aRODy|J6V zwVjpme`JdPCXD}?BmU>0i&9y4#Sy{!M5J96Y?4WjompD4D09)bR?}pt3}dnurxlmP zG1$`%F(%d2$4nVKLcSf{_tbqch*7!i-LLme9JX~yk@6qt(RVR9e)UPQrGNW*vfiVu)n zZ4ka2f4(TQwYTJdlDAUTdcDe5%^no4gwT2$OK$6)Z^C|2CUNnnJ@6UOzi3FbylE2Q zSvUNx-V&i=B$uN@V!w6nhvDFpRY}(bW22zXn+m8Ud$VB1xtUxs)KI4%!35hxvWeu% zHC<)*5>(?zw!9a`d$sC-*Qxvl(Jo^mNkNCBP{Pw|nbBmnK2Y1oTHoJ#Ql?Z79M`V$c{=Z}=TYX9%^QNQb_+ z7DVe84#dWIQA+w+M6?XNZ3=HTlNeHB>j-qkUTC`!S`OD4!7v`PM!fdNN?=`NssE9X z;3776zROfd0c#}AlV3>z4Na_j7)@BU!+NEsJ-C@Yc6`KkP%FUtMO4m|shtAzPzdo! zUN04i+*7h1&3vfQl#1ob1-dOqPOpVW`Nmpq<`+ss(*~x@Zcy8`p{7XMZr`ju4uM`s z*~XO$XC7v_{#6wyM&K2=N|5NTz#uJ89izQ&8@~`SEI~|3!?9_wY}`_u^Hw_gvqlV| z1a{)ZCZaiCs697D)Rrz>W)}4Rv%lmy(#31=(k*$VA@D|F=bSmiNadof|2GNVYA{mGmiBm%9t=2(2)?!lr9P5yvX^`j ztxy^NfDGed9ZEf4GNuQ0{Z+Xfz;KS*2U}{^Ek>gi1(QtLA<#|e z3&Qb?bn<;eTX;649778aN2dE+`ZlbXV+fn06tf%got}@oe#qqd+ja_yZt|Y7D6{Ds z;v;UC2XkGyIv6gz&o(3-MckeF`@iK|G6f?fcK@ROhW~>4|L@_!^53BT|95!&n}8yt zZ)f)(V1H0$^Dhto_EAhnOQ-S;5>f#YAZf0F{ux1;PaaDR`z)4R$jpt}EZ1B`m|ggkv09-kzvmp@4X$GK9vY6z zz7)u{AJ#R1FY^yM=y+5SGK#4r4A%R`zT`DA|EgLfH45m-D9 z@OqcSmI?CbT`*Fp`r0~bht6adJ@ctq$l)<#xx+(*&OT2{ze7x9fCJSyHCp0{r+^G;LJBF5xB#;S0 zgPfr0Mk2LElCdH^fbq@pptTQ<#_=m#S$lr> zV1jP?=QL z_)jrG_mA)7KQlWR|L@<4z`sglAqR7p|Mi(PS6NSPJiF#B{sBi zr03FlHknFg&#*`~$FKPwr@7QMQVt=UWiY!9G2wDi>mv(JYL^j-2;lZ#y$=YWClD^= zDG2cNPbD6h^9Ul&w`I2M#Mb;K^u#xXP_iQl{NW}Ba-kn=)?)gjse#s^Wpl26b*YFD zlMB-VmV7fVYfPt3V+hGUzc7b?h&K2V2;tFQUV6L@d3C(#5_hg6GIov5kgc%S8Ao;q z4=fNtq&|=g%v#y>2UAt7E3UPql;RD=6N3^KwDZF3Pz>$i(p*Yo)x>T9e_Aj}1HF}2 zRLm_#s1RPzv;2`hN zP*Zz20d+CUifzexywJ&YGDAT`&kZ}?@_w`}%jo%pE5;Zw>5U2TM{5sGRK=vN)@jW6 zj8W&1tW|nSHOj9N&{4RqLefP`%&Na0rHbfPhfpHv5{v=QiluVqpsd(IQGT2C!JVzz zWA*#lRhWy(X({YLg`WWl^T{5-Tp^3+G+ecra{q{5KafI8G-$2RLsCS+K_V5HoM{*m zCzl%CBL$_~N3~E*dm+grY)W#fl9|h{w;sckZwi(RD2LK{ye@|^p2NOzGW48Tgtccm zwTUxOP|U-EG=HhOP){d6_)I~jtGQ_FemuA+8Lb2{*A*pQWbsL&xCh0Yq^=rYeET`V z_z^gx?P@#8i#p46*V$A&5G3C6=TE8;n6SECdcQ9w#AL=$Lxn;D2LY3o~BFsf#evG9h z<(w8!xP|pY00nF0x5FF!T)*VS!$7Vxu9tbkd}Wy4J&Zv2Y%UB`B`WU?#nOBk3PcT{ zazngx2^m%56yRGKqQQ!8Ao`wnrr_lW4D0ih{`d&5^Eph@v+x40MZjC=8hb=%FCPLX zrD~&JtZ$nS0anU4FGUwVekd66S-d+pu6*=?J#${u;Vhzj^dVwI3D0A%`omu`7r&| z4$-6r@^MVByVek>mY*M6Oi%mHOQ^`6bAd3FxFT4yJ*4Z-Yb}pz=$fb1;3G&0p!l1xKn+OE58%&7N#+~~Ek%A4&+zG_V$@C$%KXk>aFIZ-?)NlZ>>bu<+Z&Y$ zutu0>LR2YvV<}IaWz_VqU7>G~dmbM<=xfou+p^z(9|?kcSp)TddP^XG#M1vAHn?n`ghjU`4?;ANBNRjzZ58yDj*~#9cjMWNGPXapNzw+Q z+aZ3z+jw!+Z+QakxR;MKEdm9muo3GYoXFttIPyI9bey_)*46C>Od0kM1IayM-sY4p z)~Rn2+PwFayo$ixPFyNm;xk>O-hF4Jk0p|FcDuAJk3> zBzn0%rqptHfh3XGQksAnD@f=`p)v)2>b9hD&}IunuJkpY%w%4y@mqioN|k##(d7gb zU9C!T8(IKPMn2ZP)>e|wspF_B3_I|nX$2X|rSaEY&`jqpa~e@35ZUgra&g@PX2+ph zf)Rb7CK*)V?DyNm9_!T&Dw~lQ2A#}cXdZZfi>X22JE6^&dgLB&Z-XXOpHXd|?I`MG zuyUn!eVwOHz75-}3?9Oe!N6dXN8h2$EprRi-AbXz3Dg?M7w!ekILKZe7WbwB_?LY; zsH!ejAU#?s?o~GogbEsm*0IdSOfR!Sm%vd>5J(+&apN6B)oq3~W)8YnN8Da~=0knPo;Duv z+d_`Zb5?hO`=r;M=G)ll_id7;fWdjSBiY_kb8|;x>V&-xBi{ z+FD;44@Z+bEXc!WLC*P&{a4o%oL)HM*B_$u>;GtY|9{)n|0Fr*e@G6>H!S1@))II` zPD!PK7s+MU+?)F3fr@XRjdCdIEJ!(@;OV>6$eMtb6$ zc>Q%`x0x0b2wXGNnZX+4(qH`4NSbF1Kdge(FMZX z<1@KjL1y}GPOITjrcA#YpmYY%f~B<(?vD-1G#tn!Ri)O~i+>68bZZW%5H0?pHUq3+ zfi=MTfLxdh#+%I(krD&X&%AqSfe^niG#+0oPOgn976KkApl=K1-8scce(0h1rwt+# z%|2~r&=(1J{Cph&1r;2?3npWtHo(8_AqCPgTB&94EKwb=*b3C9@c@p}oZ-gn zO(yN|$LKGM+vQ;%tYOt6R|A4u;E^lJC&T4p+e^cfb+UQM2^8G0!^Y%Qe;T>7H(YBb z-f<4DY7+_mO}Smp3;F43|K7LPV3PQI?anV)q9-PoiXV&pmpu+nVt;6&() ztho;E%sxA>XA#T<^83qrjuO;};Bi4WmA=;3fielNy_W@0NOb#BPGOdKlaR(aki|hg zc0_T<-;p4hmJP}*!I4(#Y@VcuN+Rv$82NaGlBmlm*^RSt@(ZD4Arhide%i-PQjq&8 zIh+zr7%oe_YWU86`saqLjZ*zZR*f@b>3MKj{w5-v(;RPd{YlHgN2>b;#Mw6`pf1%wQmM z@m(1gNj0-Cc>HjgV5ueBt8?h#7T$mg)fb26JRDjTd#z8sf1c!dd`xwroig=p#a1|K z&TU9!6Yw#VWpjksZXhG_zIm1z>8;~{g?2UF|AcA2bb(v3iuIiCv? z{Y7Pb*Q0gN=1He1#9I2JrDswYdA=NWFsEOL2@eyrH!*BCr$1JGODt^1(-)<>Y zsK-95-oz0NQ@bh0>XzrI#=?(_rc^kAn#QK|@02hG#MdTOp}Cj%>kIQL_NIr+^}3U) z`4x?*yI1&s;LteqM?q@)EBNJ4?Cq7s0VY!p;p=%9m3fXbl>|56%=X4g^#hU~i@Z(*} zRi}1^+|X_jui5J3ZViK@%zKc0$GD*0GN^Av75SKJ$^#(Zg!#e&o`1r9f`K}O3|w+s zWbf4v-jd`$x_k;^c34a0i} zf1XsopYr{M_9*FCw{d?s%+)`7$1d&A|46?4Bdi7ZS5MeZ-^r{oY1=lP9$`3}&HNj9 zZ<8#>D59u9dAU*Jc}7P0S&QQch-_%s3Yq2m6E4ndy?@QTkra>J(X-30`{M9bu{<7E zt$3zlE<<(>ByvC@)gsZ`r^O`V;p1X+y>OwgRpuovlC7JDe(Nf}lJStrS*2*S#;s-Y z>xyzVgL_s5r$8kWcq<{xau!=Xp7Z;c&!d~W>${!3qlfc1j&%8Bn9D1}yGdo8-qxAfAU$E1C$@~J3wIZ+)QUI7PK0v==!}#>uA3X7sP0eYj@QA zNj{k2@R)1K?N=I~aR-BcT1jBJ#nv&Xci5zVIT^-j$N%0N({Iq%~RO{kLyMB&Vya!zT?0h z<5BH|<7vA-T8>SW2kYz_XsrX=;UI^I#x2Bjn^F+PKUz?}Qee@0a*Zyc=O2$6{1OTA?2glu1g#5?;_|p>sptWmxwmw%o&) zy7u4V*5fgg>Tr|C_pMP$b0Pfsdm25)qHN2>#56^J^=+1Qo=dHmxhfnq6mp3zH;uGg*Aq>&<4b2lW&(%LpD)*wmsww zV4G7}tSB&19t>OWm^>L-h`A;r!f!@6u^&8$Yr%fo?u@>lN)T{1<+LX4k*6_w4_u^7 zUcV7exuDfQfl2|{v^IPv-m$YgWhw=FhomC=JYYtA%2Td3zWUA08p-=XjM(X1@uJ4kspdC+yg# z%s>2Wd~1w8kCVt0j>_Zr;|-Gz70mC@y$TTHXIIPq%q=E8EDKGz+X}FPm#PcKN|tg| zcs7@t`FICoEXjoD8O$mi{(8aT4N2E;{i7{beQgYYEjPEE1r|*hh0UD!2MKm})?exS^P81-C^w>Cv z_|=4yRsxQ~dG_x4mpov7I>;@X7>LVIHi_}`*RE*LO~(hzt1r8ssrPdZ{X2M?(+BeM zCCm#IB;b@{6!g@P7K)T1%Yz?XY}DnGb)(}*Q;zF=^=RI{Af$xw+RK)*i&M;h1rLZR z^iS8g(w04Vkh9gY-hKPkZd20j`Y;aP-ZCy<%b+QGo=m>UbrM3~t(kPxdIcmGG5Q?D zMob^XJA^$CBPclJ=cM#lgwEeFP2xGo>Dj|*5ZXN!w>RJzI|sV!9ypPUWxP{$f`Zz@ zt<#YD(;p#j1-43p9{e_F%!=mlB#ozWoZ6 zbqaKqHKX+xlrG&ric$G?A7x*H?4zp#QN5YgeOz*xiLIQCbq^Ot%0R;*AlJ=wiMe;j z_aHzS)GYI!dBo{vE$Z@Wh2gTGS2Xy;Ryq#DQVGW270i-Wsdq!vFn=rh2amQXG_m9b zNQdSFJxIb4Rj#_H7GZ1;7=_+07|mvy416l$&$wTh0=sV>0o9`P!ib4-Hc$0kn* z9Wcv`44%IN8@-xO_XbNeFsT61F%YLr+pIX9)fkP98?67f6D9=oCS(!>6g{Asqro-Z z0igkA`&fqAFe%Q09|Q|{jWWQGl{mZ_MXO)hKyou^i*E@bxR3+z$gbKGq}jYz-?uGK z%ENWso=7Pa+1^#vB($}9CcettWOg;39l}{EWc?L#3~Yq}w(B((TPkogjIlsA08C8FZIJ4ub#n@O~r+9uQ6^D{;J*Lta4&` z(F=lwzC5^bXVffBR=y{Lp5cmY$R?m|Yo)1y=OWkVIIl;QNyuzHaLegTH7qI1z{{HQ zG&{AGt(DGs&eedp#zZKGxu|Y!yUvdEwE^@A@2IY)kFL?`fj^zD^%=gnRI4~{PRou1q>*RM zQdc11kfAsG?cGmEI$LKvzPjgo9yz!lIw<+O%5UEv2CPx(N$-^Ef-X86GpIQ4?Z ztG?d_h@&h>@P#wj2TpPZsoZKAS4-2th(u&Fa80ro)qI2VK&xgW1V__Pjt5R;M1)t} zDO6$WkD^_-?_o(-_#*M7OW3y^|iIMe(){ z-(%!B4RquO@6Fa1_nh@Huw8Avb26rN6{f^~nopo3h*pDa^h0Os>C~k6GE|;4XTbpD zF!uJf>$TnHpV3S6UDgOHOQGN)Kt3iZN`zlG+LiGxO(E=13rFY6tFrV~c)d%4NW;ihLev2u`ej+sF+KPq?16W+OdneSkuuvljsbN!b z12GRD?}yGEM3;_lo6w&WI?|!Ozmji5s}7Q*vJD6br;wj@Vf>YC+)v^Rl5dpq?&E+C zN^xYDvQoNt!}v}4NLfGMi?XbFT)1k>GYHEc6lB^gJz2F&o2 z)C(3L6ryy+ksq)r&gYzAuRyX#en-FT8(s&&B~GsKrba>ZunDHwLAa#I;7N(%S`M(7 z5E}SQyZ+@Cz`LJx`2~7_z3^nic&SPx3MJ^83WQLiff}*O>1XuY*2WaRUdEN#js1)o zl_sP= zR1ZvG+pOF}Ws}*Tu#eHi=WA*D=BY3tPkJiZ)5q(*4gSFV7mPJfRqKOyhQs#V5e2LKNSsv?4 zFDzYkN1_Cd&UTLqnZ3QS-z|((&zUBWa#s3>i7Lb8SDSRO#67CYf5*cg$Ec*8?y{q4H@*1sRICjybL znUl(;XjZ{bP4JxzU62rIM^OffRky~AvKSzWm}w>>h$AUYcwAVQM~5!u-WE1^8JJUk zg+P$uGiZxoLY+dHR<&oBKH}wtQ{-fxtc#l z4(IJb%uT3;{7i~CEui42&PJIou*tTKn`F>FTVm&Hy6tdT;5`BZ)U`OuLW`i(x>p7; zu;0>>-%s3%&zRhf9&93Sj8FAlbbv}`lt}Gyx)CUu*zoGqGk_!s!L`hNRMnLTpz#`u zgebnl+ca-VcwksB;eaHg6}OODU(7!l=3y-9DEA{6DkM|P%NW6|d>VOnz$-Q>oa>p(86=D zreI8%gS51Vci~ECsOOODmd>ZBHj`1 z{6~&F_f#6d%d4q%&FQ^fYE{fRswev@z7ivg(+BaH)PQ)~yF(GP$M6Hrp4>MrTaFa% z*%9AuvJ|;hcYU}H5h5JRIVNBz^78IfbtF(CT!PhH_MNT;@5%R$rbQgX1ZpcXJ z6oJO0VtC|@_Wn)jn4F_UKa^&lBWSswdHb$@#nEBm20)_3o+wv(XZeQwQ1{0hRN~4# zWnys4^R;3!c=z%+iT(K~N$ppe6y`opstc7&QY%%`Vq%JlCDvK2j_Cq%<%s9Xa;?oA zHiwzHN1~Nv;!%CeAbb7P%;YiyVu5^+&@)LJz}=pZV>VvLn7e@iMJVUh)4=40rRUPs zN0fWj>NkE%bu1?pP$sib$oD#5H5BDQ02S)NIMl-2uJ?0w@}0)*NRm_%D)t4aBHqqw zbBYhCE%%`|e4xIl7-(8A{>T7r{kP>_j)smx%~EDTxNhszUmuHXh!QiLFp*v(rXd$>p3u3H48@5KpB7?4Ft@) zQhs%~|Ja(|TBCU3oM1A+mKrh<&j8oQ8In}<>j$DhK|?>MlvR&m@H>L;4iG-_ostTy zJtv@QktNtO2EvGm+lI>8avng*b4CNjtN;y^;h<*J`VqzYh zI1R$Ii9Y?upaf-#G!D>!69Q*Cj6C#WJ!Oaxm5V6c^YbZoMJ)Blq@gARF01>dJ}Iz) zu03n1Kcw->lYwP`P?ETEOIlCrXR&xD4}~k_;$X9M+JT;vCvA|^(-Psb7@*%}Za7J( z{Q!ne0KCCk7+~THC9;ZzpqcI<#0ocm1kp`w?0&NHQcmcdU zAhBMEbzJ1*iXWbmg-~i2U^8xoc@w=Fv~d&7;Ba0DkX$~Ui$k=`vpOYn#JMa$P6;3;=r z&ENe#jtJUQ$=r{0;Huf)9IbB4ww6?dEJTXGXuA*ViH`a5-*1y}mV{-cbk{A;csk9; z^=uRTBdJEWaiVnt?2B6@{l3*>Joe%7)nkfqAKCZQn`XfkqZOkVEgPdIAk^Z~{VDpl zdJ&;l8S-w4@#yYRJ-4@AXApcsfpwn|p9EiSM?fLavm%Zc^+Clk5oa{}9BjKZ@B!Pz z3_U&6%!mQgAw|h>`0k*n!w62;+39UM^Xm+#xKWF1Vzlm=VUM*|3Vza4*WuutB~<&L z(^D#Rw@nu`jEA!d$T3?C(>_hZxT;1EeiY&L^k)_i3Pp7M=!3weWDtwbM4X;Lphq}m zLx}EJYVn!n%dAo9Z(*h>7}XuSamBkc^G2<3KWgNvSTt`;kEy?(y>Y;{32^d*0e_k;zmJ3S6U4zV+!b$+T1r2t;L{Ny z6^$VsKd~@M?RA&oQRxPn9zyjh|2peCR$GV4I#T*T4u0br+U6$D$}h*=f<~vr)5lN* zQ)1TjfPFlSS8&9Zzz{Vvrjx;nCqe zteXpt986H#S!(>&0ULR*2v||mPehORa#H@V0Uhac8J`{bA?(S!Qhse$8cY6XxGk+=oEs>uv|$hWFpT)1+=DPvb9( zhaSI@0B2Xq__GS?Vhh>gFU7GG;74_i2E@}LY7!q2DC4!O-Z{bLZNwH6#N?Ib&hD2K zNp2KN>g1Pf2I(JDMo#4*P+XYN4Mw4?^z;OCwp?_r3DMp{7iq9WB*b~*qu~w^bh{1l zywls_lDg<>9ncdrWyf6r4oX@OY`Z@YMNs&mps9+-ldcS0N!HTd4_vqyBlD$&o5u-p z|18`*%E~tw@gl@6)Owa*ie}g;WOVRrzI1aR9U%hfJ|5VyN!Cw`I98VG5H2WqWH=of z4l5xJm%rVZ2og^kwqm_&4njJimm_i-k1hz`Hn3#u~-;bMT6HKL6@+u@% z)J6=>XxZ{i0C4Fy9V6gx<2DnUaQ*2HykR$uQaD?bC52B|o{)!6cH32<-VWh^HpvQ@ zkquNaqMrzdxH`5#nUX)9CLr4SAtKwoq+(6bFjgl0=K%InVk()lOJ!X^^PGAi0$5~V zA2HI~uisd3*3wlX-DdxKUsH%n%gye=TIPo12=r@vbh;I<3BV(bJpNEwK66M9rf?XZ;gd@WNbycg8_GR7`srk7+rozt&KJY_ccQX8U$&6?X zfLu8;)VWaZ>B}$O{s8&-3`X}roAZEf7~DhvE6Fc=k_!v6aG+QR)g?A4Z@WkC(Ezau zc1frSYs0b4KyfQwOd3iV3XH$l_aNbW1#=a6L~3)fws*W+BxfKXInBJ3?O)V&>}nRGZ}t773fpgLToX zmeUE1)+zZPaN0s8xWS)RP(-AmZ+p91EF{JtmC6C}-J_=)j!^HMAV_`MXR#pH87@H~ zEJSuta3W$!qlsiYsgDIC@rn}2gQ+V3?RN1fQ6PRVoFj3eVoVV)@w20gF9%MH7&=~9 z&!PeG5xw0qTRIQ8!si1Wgg@9sR}kGI?ItT7FRYER%??$lf2g!7PBDg839O^4LfRz8 zQtcG~lJeiSZ4O~GgrH9`?1WdadNbB34>hIceeXN2Y%*q8=W?v^EUi_tprEAGNDBUe zVWAa}o)0hmo^3LZxqV8N*g8C{Arr-;0a(8PQhw^+sEl2~q2B~EH0g0>JtXd7V|kC~ z_@N9%-RzSQK%NI-;{!=l-I_c@`JOK``05lXS#R0DE3kZWNOhUqDeXf0q*?0bgjnC7OW&AV z-nua_O+=aqTW*TCQ#6(0%%aDB zTC{#}ve+OY&}7qOYt}L_9XE*-P4)#Wdk0v`XZ0 zqn(T-ZxR1s{}!Id%C*|&Tw>Z&>9LnZ^}I=UsLlf-G2KJ+S|ZaRtY~iA`JDpIfssaj z>D0U=EmV``XIcTx)X?feYQ?!^8H1@N1lOp`E#=C2?4k{A**XRhGFo?>D}8*>8loCL z%u#j(4Vy4CgHbM&Ny_QZmT;%jo7~^}>2m?e*thWO8aiU@jmwI2jxzLjak~9SP`fz# zmz23+9>hC5zQy^D-Ob}R4%=9cD@2L0s&=;ITkFC6g|dLIi;>@zxaqljX2OgzCn_ z?+tAFT}ZmV8O(re(WwNRfzu(Qze~w0yD_`Yk{*DD($)fCAED6E;#(oS3 zya)A?hj#*RDm2ha62>p#vfeJI9G`8Pndq0Pfaq+8f5s$!^RwtLBg%eMnO_}Jzyq1p?vmd1iZp_{Ca zr&jo26Hs1MTQHzfpoQQzy#RiFrwR-n;=bZ~_1fF7lCS5lQ<}mC=goa4;-~`e!u8OkNIif7H6rWhzMcsfpx0H`@S6~ zW7$^F-~Ii!JA6#{@{1b|008Wd0sOxwpfADyoPbjN&jsnfJH!7~k*aGtuCta+1$^JIuRhnAqav<`X(hReS5(J===mD4HI^}?oWK3r`ohyKV4T14+fwKw6pTIn3X zuyNXqb}Plp3D`kKk{#&*v+<$Bfq8n?1E}s{!+pT0%-t&N&(6F z?L9EXC#CD(`SS34XA~9(|Mg?(CviMIR2CFZmwx%~4^$2h&Ur&I%IO7!Bu6cbDA1_> z3@Vg7RZ3u&VtGVn#8fO*&0M=trz}z$TF|##H7;IB$NU|;I|VWWV_5#BkllkqhTe@K zlvepcz0pUUL+O&JqooF2$D~RKc9A3Ujcv*MupLApe=1PZ(FF1i^P^o?fZd+ca&Bwz zT?B8&uihPq_A(UaV_5r__%QIFX}6kM9yxZPdCUm{pgV28_Ypq*s&dr`jD}d({Qfil9Si2O6JQ|QB@uvNhW^$YldlDVvntW85Cp`LDc z=BzoRZ6zS3aZL*qrHTk}oWd>YM@$=oJ_t{~&`W>6X#+l84i$kzKP0P>F_!6fAH)|=DFGecTa~4KDxi5)}U3l7pN2=;`b|sgd zg&Rm%F-#OfA+LsE{VZi&(4X7AI&46+-}1B7?wTT%E-Xj?ISSG8O2uSo;7+V z>OQvgo+ClMw$0k(1}%yc8%?zfJZGhgZD9D805uo7*Ymo`^|Vcv;_ZCVF0OeP*s{ZY zr1Xvzm_^on0%*t}&5*FAQZdAu&5_X+H$-F*Tp(zlgd#{zK93JRKJrWtxR8bwI&^m4 zCT=1mi;F=FDLq%QRr9z;#ZH=jh89D?&2Z2Um!s>K)M++Gu3kDK$;V3UJGZ*sR?nKD z9EBV2+CSOQ3Wfwv3xI%o(KF(|!1lFj)!gh4yW`OsyCC1w2q7#IW=>A{6e3+6AaWwJ zq(!W3eLILy{?9nyBtG6tr12B50WC%q?&^S2AF<6Wt5I2280*&q(Ba^ z5hGOV{bbIzi9Z&rlhAD@Iy$i$U@dz$>FLnt(^2kP_TSRrB&I;mE6~ith4Igcw^Nk%=ey;Qg#qf} z!zuT+%ygw$lfFvKJ8!zTP_L9rNI)9^^6@dF?8(sDsLD@2j}+mJM#{x&oiSzq%=5>_Tt*WP0;G|7>kQa9RhI_7xwDC9lj(@aY8@c&x@uHfAf-iX}>W_4K5nc>SSU^FPzk95W2jIbA2ZTdx)?Wi&|U?5XPHlVOnh!Tc_QE8&& zQ3R!|q7}%Qg2`wTOl`l6wG=9a=Z%%X#ZK#3Rr%9W{K)8Vk)c#38Z&S2Yf9ZqVo$6E z+1+xYfHUnOOV~g?CMq4{`VIEBE976rS=P`C9I^kZvyiPr1mph9*)iq?y#n|RdhMaboiQF zfp9oeZ}z!|bN;;1@!A}YQa}UTdztcHi@f*7!|K)-rOZm6yq89(iwZB?Y23nVK9Ba& z4$ei?UU1?&obKH~avy2+i=eY=<;5Cdl|q?s*+MvHk&j*HU&JfxV@{$EB~+W)3h0xt z=9(4{aQe@QC+&fa&`#5>&k4uD1?`;A#P_Bs$G=Iob<(lxwu_mc=}&rtTASxDX~ucl zs`56FJ3mkKHU*^N>DzTDndMrFZl;L(?1tY_wug&a{#-ZVv%+;od-fSFJyYgU{=@iW zxvh~*m_d)R@KK>;+zY{qgUM&X-QCsFG0ASSbO#^1dL0htCN*uFyAOB1Gm@E2ZQTgT z&}7RS_4)v19_zlzN+`_>O2&o_WjV@Jl-g)xL3q<9L`w9rBkIgs*5&1xmC<@^ z>Ld?_#Xx0^K;Nd`3=73V$})Dk>z(r|f`bY~9BKf4g!3$N=$^M+#TrK@fA&Cq29zgSMF$10iI{PL= zyYyZAG0)umSOFVI*sZ*RG2%Qxq57lNpH7pYJC3Au`H;fS!rvw!W1h|U!DM`^&p@Qx zGy(h}Nw}fV*!MKEXp-WZB(G_-Z_v=Xaap=fcmN23D6WWpS8{08%}CRjW)(WoS6usle;-oHOyZ6W5htUUQm7%e4#vWIO{{r$FNcX7rt4l8 z8l0Dw5)bhdyXSxJ_Ee&<75c^OihE0zx6_1wB}l~z3sFbxb?~&?l8l*QkBbzzT{pr# zH3Y1^>VoLv@^xR8$jNJa260pG!G{guUqAM!V$6ng==rF{w!)3dTyd;)O<1KTlX%B`+Ivhth3~-7x(-2mYqJ_LQ63TDn;g@?88zvnPBR?(WDdh93;vXh zJ&W$Ymp$(Oc({KhpPtyJg5zPwG%4PEuMRw|uMWy;I2ux9%-=G_(CG;=`G_l%a?ooO z9#4oGj2sMhN!akkTXsp(AecJm`E5-|K&2L-DPxtRV!tayIZ z@Xfb;{meXuYQYVF&OZnNd1%w34%dg+%=Ji`-}&^>(J*ZnBZkvVm3nt3fAH~bsSr1( zsqf)8CHiK{U?my8&%bQ-??c@ZfPZ9A%Ky;K?)oGALokZ>{{tw0LOA}*HuEnqk(b?P zNANCE`0l5Wg)7StUJF5ns6>N8a&3pUXm97tR2h-pSc+NU7zU(T`0m2vlAfCjwgI&^ z^kK?+J-U~2MB}UD0Plx(T)KYrAR13(z3IuqKn;gW)n{X*vv(X`vNm_Q+T76T9N=aX z7_FS#`%b{KtxOatIElK}=vLu09J2e|qI_(ca~yL(qhr^u5h~4?g8DT)u~z}?e^u~W z8l*p;&>fI*gMJTp5D|Bu)J$;(1g~ta502*zL1Wl@1Svh3KsL|1{hnhyak_2s3j<*j z3S}@iYxXBcDUN}kpH`9oxJ;C$8mTEbL3bUuZMhkp-GTp=(9qM!l~RTa7hRUGdH3_$HLkKw zAajl+q>)ua%>B1F;QT!iNd3%NtM40}dc-pEyGz5e46C1Z`wRLq z`6V;5gY|_whDLX8_DC>5l?jkw2q3%w(@<&}CjP}qOO9?qKer^&r-^lm(D0e?dMw5V z%HG^QoaUZOXe9I9k$pG>!HFY87V6I6FJ72#Ilyv57xsF9MnP24d~EdpLys!+YZLdZLb_HS8% z%H1hNg~sWZDu-DDTs*I(DkL1<9dg9dja~c!gMSaz z_zMw!E67Ur)1&Y;&s^U`l_z-m!NSYH0~^zWOjnOOMo7k|6s!w`^f!z|Xt6G}0MNAvlEG@yT*GBp#fZrI{HUH1`BHYSzb>`63`TnmUI7bY zV6Dx{fKj7P)A407!-Xs|niS%BXf0u9`z`#O?LxAF-HC@xEP#KCAVT8xvz=(xp^cxf zNPR%lf!KGPb%k}}x57XGX1|*Jn(Lmh&ANs-oL260&CImI_5#QAqv$3Ergd*LcMunsds?=Qd>N_xO+alj#BqYO(Z#Ica9rKX;on zt%_2i@%=62w?j7v<@_V!7yR=&{=1C@sP@k`*8hOZ|7Fts7cOtxuF?OE%OTjZkOosv zHOaG&xefDF&iQ$XZ&n!rZq64p_7df#-|thkf&?{DfD#$)*In1%D~O=z(VjtiwRQ3H z@$<;3=tT`T@qG4Vu=@wU5RTQf*=vqc%jJa8g?BwAK8$5IE z?!ah{g~KQn9eU>mo2Jqu730x6KG2_S_Q3zY_Rcb@t|e>NKyY^p?(PnO1Pku&?(Q1g z9fG^N1%d=8!QDe}C%8+nyE&)(9_XaI&-li7f8Esnv-TcizqMA?EM4`?d3Skb5hZfS zO~08m4MOkYD8cDZU&RB$QEDkWS1b!1z}h1?IClNg?vi#VAQr@bRS|5)jq+nNNv$+c z5!a+C829xHQHnv?IcO>Ac7c)Dv52wKx{loSx8({qEP)G!29uOSvPn#yFkbL&ngxE* zJ8?0TGK^zv;CRP|?9dqv*z%J)qq}3XY&$Y!bbRbH3u(h0EIj%q+>nMUUvz55%Zj}g zuxvOHz(vOk3$r#az*s7>4ZYA z0o!M|FxIN}_5f4JM)zWx&G1+~Fd>!v+?D#$)YRE3(GnjQG1eh@wehGOY$fPwZ|TqI z4(ixGwaHT~^2P21qs`Mwo07YoOrgmWQCt-f(Ec|#h$^Bx?qJxt#4&#{z~)Tu(p zH&M2HM#Vc+VHaTX3TJo@zO}wcHFnqRuMjNHZcp0InpG*}b!%q~zfmG+sHvokAI3Bqt)H<&KzUydMzNH5+85zcrothflFTIxl%i z>p2G4SM;y%Uql^8mxxC>)&N_w5xfqv67|VIOO>qcOCTGQ^vQnktkom8+B4w23iAM@ zoN|eG1FRs~g)SVJ@Crr|#8;%+c{0L-_^qvECZfqaR-}A*0k`r&DDLR?K_-Yg z)W6?Ibi0IAKpbTgsE?+PVWTj4QlYDCzPPs035!>PB2jZgIK5TfFzfybM%JC*u7J%v z*e77wgK1PBbXix!-b0ePa%gh|{JN$vYn;P_rjPP|&@iy+b%UawR=8M4wnQ7{WHJ5_ zNuTSmyl+2oRs^&9w3|wE39yZrYsW`<@X**So*r3L_6b9A@b^9F1l**=Mish=iIO@H z6Y8e(rj0OX0R^PU5T*4+Ie}_-CtG)%*Ejst!qDa9>GI8=72`D0G4qC89gQ+*@oB)f zoMFHVvT7FXwfu8lYesxXMBan{4)E016C{fr%k6?4JR#@Bu?{E5D=n&P*dgx}J#Xp~ zg4pT=Z~#-X>#=W9zv@hbn^ZsTORbdX&j*e_RCGRLnVS`T3}0-+IqB31XED%&(U8=B z&dZfPy&`P`7gscvSn2i0f_g>nY6H;Ec0TjH+2V-xZ2tQvFvavYfT=E(CF?mh#CJ|U z_XrwVRL0c?#-6IpW$Ll$z0-7WQpyq383Ai*+jxO@gX zp3Q5xQ4&QDAr+V_9jhbJ;BsWATO%2KYqmu3WL{%0ckI3&jHF6_Uy?1Po#3$>DKhfh zx6TVKo|YKm{|k4ePY!(1lN&d zeOVh0h9cJSXKywxij%HpsTjW?H+&cw8)rj?2rtT3>g%ZG^-3@Z3%>ahWs(wmS`&xP z<%2dP7!6-=s_8j}7yV=Ia-G=>2kGQ<7o2=3#9;FV>JD%#nqilN<}~9s$ug7@rHL1@ zH~33N5W-;ZDlpZ&DJ^($P;jXl11Xg`VZLXVZN-pcun?iEPz&pKf^m{5j@$Y8`(5cP zlSyzy%9Eur#$x6IZK50VWYp_K1EdME&!RBg8>hM_eJN{l7}|47KL zNKjg&R6*`?C;&yWc=eTdxZ#Y35k(TU&1c__=78nqj#%{wjaN2*8~5mq$pmxi!AL<6 z{m@7cC$7kSaIpeq$lp4Pn79*7iSvxIomT7#otE9WVWpcKY=Yj}cJ69s5eVr>` zCJ2TV9XI_Peqd3tdf?S-xth$`bXM7r%$JRH<~$Ez@9%_j!}m+9)?HcK*4XNp<00AD zV28w$;j67H=$zN6nMmJ~VMouXLl%Xq(Qz)sj4;I_s*8y9#p`_sQu0hQ;x3_`E`0rx zZC>AAzR9#M9_=b14@whemE08^Az807)T)&vGM}3+r9Ye48sB%V!lP?xB!V4iq(-PD z|9Xayh8U|y0aQ@@gHH5Aa;7Od3;1Rube%QEs@%2+hfr(rtr02Qy=XUaFCht6#0wK2 z>kM%01u65gEyWW$t4Si8i}B!Ue~%h=5jGN(%-iEm7{kHln;VAHy+zjKPeDiL=o;&^ zBq{|2u+&D7CxLwzu>=pWzVoFq8jKx{Jyv5&f%@NXZ*H)lUa(qROv^DmXr5Q*|3J0b zFikLvs!+P(vp`M_$L^en*~;G`)BNn_`uzusSl*}%OV`^asX}>r@#K@?$sifGI;u3+ z{;4p?L1QwI9rJ$Ylwsk_2O+ds3WyCHn&E)NlskMS(;6?ix+X|w#*fze8Nf}PI4mRn zRIid(gCs2Gtu#G64V{voS;cvRn+W5D#X zUWXvyCc48%6BFzT*Q8PIp$tOX$Y%gJIPy15$cQ;hsS{M#CR&7Za9HEdF$%nXAZVI)rD5^ zE_pNa$xz%U%FV*j`0s<((>o0~nmb}hseA_FsH*jW(f*cFAKOMT0xX%iMUH4uf{T7K z9XMdVpcz#^S-|B|{$4>^$w-*}{kcVd#H=PZhXUle=p2eU1BIWy;GuU#ghtuhqkeFQ zxJsJ!2C!K=K{u+X z7Q>EbYF>=@>AvhIa3CoWT|S$l@~J9q!LjdSLgUkY*>d@fvk@#s`jb8#1#YSo_DW0) z$q!7`)HQvdTFQY~S;s0;8E6OiPr?4=j;;NuU7ht|%dP{pSt;92*3`80lgCHluniYq zKpU3dVCR>iB&m35Ya^Csf-1-lQcqspczaiib`2*Pb6Q2w`@*gI;5gGxk6)4s40YC} zakz=)MF_*hlV7!PM9xu{OuvGK=Ft-cuwJQzT;Doag!*ssVTt^&Dq%m&a^$!6=zmbw zgfV;##R_X8U>g9^!p*&Y2EoB_l`W4OINQ*oes*nvU=M?Yyz4CiYMFR81FT4J{=qb* zZNPkBHbqtMkoBBTlX}Ow`m*JurFoD^wJ3Igp5%m@ZcfEckKkK^MR)(AY3K8pSc(>)(W|2<5n_OCEqzVn|jo!jo$ z;tduI9C%>bu{vy3IZ{rks%@{fmFQRsb-8s7+jcM$3)yv^5gr`)APE5@zA9FT8P!GW zBot8Ht%W&f>yf24uS@jB&tE<^;@d;S0>#1wS@l}nR{1J^Fh&ZJtvba`Xz)QOuV6KM z!FvJ;k~wii;A`ojgoQp}@`MCt08GdEmGU1jozNppmyDkkfNL_D?B+RrFnUDm4$dc{gK;{~dqkcZ1O1pBL8-T zyy$2W`#x~6Mwh*{XAM*6rq!N!((M|SM$n8+ok%xoze`~}T$X|$&eOmD#{7jI8qC|% zc=N;)LEtm^BD~X;GV_kFTz*&K}2ZPP38CZL88*53bDbE#5Ve0Y9GQ zg36$EI4bUkwMKRJ?5ofy08M9KZHLGmjC|FNm;4=k(}gW>wp%xSb>OPW_~OOVmQ&Zj zn{}d0>P_PMqV<+@!<@Ncu4RM5Q#$3^Mnj4S{d*)DB%3MM#=NCdklDrA3rRoVg_ zO}Kw|J!XFgLE-Z78PO-YEd>eqoNfb2tDjzzzT-82+S`LyFF{M^?Z!yE=r%(Hw;-US z3qSCwx*IG;_CUW_@9DwJbsOE?ec{!2i+iSKRsmOK(->l?c0W5^ZhCOPNg zi8@e!N+>Q%;LAvV{|=8^M~=2b=}!rcUavAW5w^m(8@!o6Lbh^>&pGr~h&qjMb?QOW z>SEDRk|8%*hM_t!XEwuzr&Ap}#W+v*-8F@bQQAS+*LNb?Zsj%5`>B4uu61WE6Al;p zV(mKCWPXOhZoW0uFjKU%Z^2vi1rCiLDUlyRPeN3E+6J#@VmGy`W$J;Yd1rCvlNgo= zN!{cU5tU(2+-r=5MT4)PaT53)ks1e_RGKCJIn1UadMtQS;DR|^;iO^l?ZoKSQfn6z zHoU0fZ&A5JA>e?_Ij}A;{3XG(<;AW8A3%sDJU+nlsAynveC9kmct=IeU_`9AQrd76+$W-akq1prxEIQ)x5#}gjIMwN3L!hEcbZNhy<8&G9E`hUeqE$m7 zw~;~#7raAffHWspp3$4qa>YL_&LsLC!wBLokv*o+?V^^dp9ct4v0s6vxLH}P0p~35 zQ|H)^KJeLM!L^=t$E-XcjMn^(;sd2cvZ6TE+deh<992JmYrhh;HvqDZzzSF4qmk(= zv+dD+sTQ!7Di)G9hu|hNL*)An5Dy-tI{Ab6O?dIQ#HWvBot4>H5gm(M02jfC$Pux) z#Uoia@lUcYCV+5qCJQUNZ3C2&RA4XE%qOsn4MFJRSyDdAkV!CcJ>`0{jMCY6UruSK zmQU2NQBf$A^BAC18dv62^^B#0f(#K7ub0dCXDaY@D#Lh~- ziS!?bN zXfMoWH;{-b6{P7oOX*$34LSTd8@J24&E*_R_~u(b?Ugl(RoGsVqh+uzqs4C_Tx;X} z88YNh)Jg^$5RFQT^y!4_*Vx@JCtSa=yWiN|Z|v?jcJ~{*`;FcG#_oP&cfYZ_-`L%6 z?Cv*q_Zz$Wjotn4VRsS#@&@2f?CzHf(x0(A>^Y~o?&D;xo(F~vKe&EWf_&Fv)5c>V zsi~p_V#;HIl!+S9gmpCnZy=ZjjeB=vCyX#KNUb~y*KNiqT9B`^jU#)5h*&W6v+w`a@sv})4mRFl^L(#I+nT|xbXZu zD<5?oqY=mlBkgw*D^@>f?OV}0TaRh8~B|^>D_DhI~ZPBlQK4@s@E(`dqL`_CR zhXMN`OG&i_2^4*v$g~S_*1{lf)w5$5r}!B2CV6%$j;%&^f{fc7*2jo3KB-maS`FiH zk);Ybn7FtpFqWUzjMgF@kKouB&@&2t%FYN$N*sN+Jo#|RZ5i5aV6IMH0z)=Aryt-l=;;4A%p0e7*|P7 z9s_TGEiXL9@S0m%Lg@G1>S(&1pn-E>}~P<(e6cuk^ak*YDbD zx*X96bp zM!^1i4^Og~1f)BJ-$jO~20E7_3zRTV_pLDKF|?B>C0XgVsf>`c_F`DZJB=P`fr0Jj z%%o@rQTzQmW+dvu4R(I)B(<0s$j~jzh=N&<$>_rP3U%}en|?j#l(3!san5?DBOnS{T15&wwhQOV6@L}W2#rK;GL9?Pc=ERd*?yPym0 z>)WULRjcnf>D^D9o`z1AGN0A6#X7Q@zb4;~gG#jrJ>!`hfm7jKz>gl0Cl5}REx=w> z_rmlTI?=AVouPT~O5AW)FK|uW-8I7C=B3#lklEQ(`grC|hz~p=%`|a5rJ9`fnj{G= zF9q%jJns4jeD>HUDQ;MRJE2KmKag3eRCRLSo-v3)SD~4AAJu9RuWWe6s9*Pe09wcF zUuP1nj3Y!fOwg6?h-KqV*xExM5BXrWGPA&4XYZJ3!9!ifa!&lcw^mRsE!*dMLpjLR zohT$knJ++KP2k*hg9XcWu#o0my0o~Q!-DPE68DFpm@Sfc2XLfO+!q1DElPC4gdB8%92d=J^C)rpMjaeM2!)^4ri&>N<`@X0p`{mTf`mb=ZU z*K0X@9>BT~Tsl*E|20+5w*7nca>VYGvsuh7uF{sGAAv zr3!-oF=A(1CMRndUG(L}o%NA61A3oaUo*O)Vr3<1nBu8hE6%$D9wee=b{2nyJ^$qE z>(zq;pn*#dKYukiinVFfrjjeT+X}h(IZYkA8>i9(qRR~qVKmC=mR7MqflIBR4PXEBox0+i!G*9I)jEVC3s_mzfi(fw1teB-BceN6}QEBLT$f7WH zALR65c-BNl*Y}Imz4yCjmGRu_G5p1Svj__lr-*L{6T6?(ccyFJSPB)+y|IwbZHiQB z8ghK!l-)N4KQ*=XDdCz~Z64k&AUyJfX9!@zGt#%wcu^h9`s({sH zq~Kn#pCTpRtc4YlOsGrBsIa|7Y5L_>&>T6BAm%D2u0VjR7tRV-Bb*t0V_7DxM%^kRv(nOb1(5sFD6SZCYzM2&di2? zoL)CC9eaE>QfM#=3KVYd?VOpfuYmtr<=7Wv zlkb|LOQb;)1!XC>KW>(pqW~96W|}OhJxpr3yX_FM zaQKNOWemIX+Lj!0x;bkVPn%^8W)j_b*44Q^{1q*BCF^}n`uQmC^;B9?GGuXQs^d)0eSh1mb8EmEBZQQZvkv~SzQ*P;V zWhWYco*M0Kb}s-|7E)Ao*Nn>|N;~G97}mqPZq=sF!$Xt50R*wCpJZ$Pw7JJ6ADG{v zibZU$d@B;6)T_IaKw>%0eQ&9VyyxOs4g&15Mud`fnO zjH^FRHS?p7DnbC533p5n#%8pqU<7WcKeQjE;*P^i|A$+xgAapfNss%4sTZ>B2)!h_`$?ssUr z>Y~M?zI@S^wH5*))U%FpaTY8ejsj4hebsltUt+Vo4Y_VY+8c03LfpK9ExZ*W~etyha&HpxgxBrpZG}pzujX z)GfT5Hmzrp4yTT$gm5k)SsOUy^X@eq2m2Vi%IUL;n$@Ka)27z=C-sTZ{+)S}ZT_GU z+!!nfg0YgIMp2pm{Dg+#ZK{uPY|TZk?oes=3<)dtr{s~0Zs7Zso@1ePA@c=DJm1Dc z)gURj9QI+@&9Wuwb@`KU)U*r5y-IMYkw@wi;-A!~4vU}~{W2WRyAesAD2MXuAB;Kh z7IhLwsG1l6>J#>n`V?;$H&IMHaABE8)`*V@1yG+>iW8j`nr&Xi_{{;)?3nz+8TEPn z3r4OiSYAyj^V;vOS<)AmB&`lB$Z8%w1>kpZo}cb)J!pB8-cYZ6J{_rRajOLKx%>zPJ4b2-6kSd* znq0o@hG?Vz)%WSTu=_ggbZ$Mnbc>>|syGbAOFCP7?VPY9WVUc^_7Xc>?H!G@giUS= zAK6X)i?BrTP@HWr^|#6cS}Hq3-GSiAPvtM%PWPr^*fhyQg}GkQcfTv<*Rm9S(0Oaq z)K)v{`Jo`yu;QgBIklNBK`J1IMkMBCEQdi6_Jvr?&ZXZ3LZR2)@z(dC3an;B95+3t zL8+$845*32R#dkyDN2O~VomtEHsFUl$KoY&eLjllwDMf&%n0V+WbNSGq~-|Cd^n1n zS?Yi@DZId}7)yMdn8nSKQ>dClM@WuM*%H{5c*~X0C%82h-aGpWW_NqKAk%mUGaMuk z?;mZvZemV^DiN8IUc=wN^@0eWz zJ>UTJQ|fP^pU-=h-{|LW^z%3R`Ts5Y`BP``8~yx$kAAX+{0ja2sVbhLpNf+90QB=! zT=FeMeB7W+q^+K=Io`w3r^}wEX$`*Uv}Hm$>~r^ zhxchtdJ@uVQzo?l54dQ(?tHINZ7?(CYe8P7X2KMKzYXB|%M#;2ZhtDy~scQKu7f zifn=xmg079{5)qg85%EJOtE=Fyp+UPrR6`|ph|P)lS0aq2sH2RDxa(};RK-KM@T@T zzqt`P0UBxATaPKF6pj$~TTDBb4Pr7;ki=ytl`IrNLPx4L)vfzlXjR#S^EQ_QZ(LW# z^M;?rqHk0@HyLKGPo22VTIXdIFh);Cgy`E~Qv4Is3}Clv52K-S+Pp>3VESOFC8&}@ zN2v-GuFbVxKOSYzX3=C{Il&~FHLeaxpd7o8)6q~`Q12{`ag^v+BFD56-hZ`nuBWV} znT|#Sr4!-r-M2R-)K4N9VC{+6(ti)L3D#l)=oZ=C;3E?ZV)u8fD^T1}$O6`Pf zhb!AG7Mp+Jn-E{eR62*5z@jIW>7-E(4Rv{H)&kB$W(CRN>gZP-fOYVxi1`{fkf*&y z$BNnJlP6z9juLI8fMRfCw4)Fa;pxaRuEBMKz|@JwZjx!cR9}#(3KN){zC3oz116mQ zbP1LGC;BAsPUlRNFKEdO)H7Zar=w~k^bg`AsR2x+qb+o6QjJ^Vu^%aK@kH}WoFREc zUovPC*F)YJg+>-6e?fLx4V)yvkgSA2Ipx?v(BqEQS-?^mS;$mwg5b_h#oqEi>Sb%7 zod)(F_iMjn!P1H~FRm6qa@{J<>B#1?;lG%j6VwhG+N}l8HHr3Bj~PF31pf@$zPqS) z`_M++ux&4)r$IPT>i&;7$HL(}t@{A@lkjiApFcI6-|*-E@9-xu2r3XN5G3Fe;g8Gb z?0dxd2&_DYE(8f2x-N&zpZU zGbu+43qgKmJ5vW;JqyEMXEYT9k3j`kp3LC_0nt1wz7WK}o6)b8DI!x5RRJZl2~d8F z5&c(!1N`zI9^rY|FMSJLd;5Q_3nKcY(8o$oM+JcBe+dJwIsw-7EA5|xFOo3+`nI>4 z9Dx}D)r7RvZ{BZd~DDtO_0ac0%;GhAXQ=cR-)?W+%!%&~TxBb7C z`dF*Kved_%5~4rZMF0%P4X~m0ymY47|8`D4Kg44=`!xPA(G@`H)B;N9d8M`jp6Oo; z-q1zY(#FE@pG^+1sQt$n@KdEeYMb2W2J`?5h#sKL{G@n*fNoy@9qp%c?#CS zGrrUSN*@$p?@Z4!AQt?u89evy_LKhO13ZnF{=9J5#Qv*3pm6>%v;0Z>@!p?C=Y6gX zEBSY{pT=K*+L|8Y`#z2A3TUuT(hs?((SD!)?=kM{(@>~@I(w3^75|C?`}ALrVN#!l z1$?Cb|4M{PKgY27sonnJF!-tFV?e>DF>)SDgIIfToK#VMv}e){N- zzR8|CDth#R^H-Ad{#oz&cGXj@N7qJAJ?#+xB?P$A_Wz~U)0X_`E99x*ql2EOK1iP5 z+)ltR1)r?ho@zaMEkStVp5nO{VaPAFo=jt(YCSr%cnax0H*LAFN3G|_a{jw%KjOVl zIh5z7ofP}C=Kq3eL#6#&+5cqPj|Y*SzD0R%+9@UfR`frb_Tw9vr@N@nP5biWpQ`^` zrY-sDXTATaX{)vVQtLmN_TvV`(-qcpbKGeArQrX_wDCUwQtO#%g8^uBK%WHolmq4w J(p`_={tvy+QpW%Q diff --git a/utils/infinidb_hadoop/infinidb_hadoop_bulkload.sh b/utils/infinidb_hadoop/infinidb_hadoop_bulkload.sh deleted file mode 100755 index 65ba5b85c..000000000 --- a/utils/infinidb_hadoop/infinidb_hadoop_bulkload.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh -export LD_LIBRARY_PATH=/usr/local/mariadb/columnstore/lib:$LD_LIBRARY_PATH -export CALPONT_CONFIG_FILE=/usr/local/mariadb/columnstore/etc/Columnstore.xml -export PATH=$PATH:/usr/local/hadoop-0.20.2/bin:/usr/local/mariadb/columnstore/bin -export CALPONT_HOME=/usr/local/mariadb/columnstore/etc -hadoop dfs -cat $1 | cpimport $2 $3 - diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/db/IDBFileInputFormat.java b/utils/infinidb_hadoop/src/infinidb/hadoop/db/IDBFileInputFormat.java deleted file mode 100755 index cbb5ad085..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/db/IDBFileInputFormat.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.db; - -import infinidb.hadoop.db.InfiniDBConfiguration; - -import java.io.*; -import java.sql.*; -import java.util.Date; -import java.util.Formatter; -import java.io.IOException; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.mapred.LineRecordReader; -import org.apache.hadoop.mapred.FileSplit; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.lib.db.*; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.lib.input.*; -import org.apache.hadoop.mapreduce.lib.output.*; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.Reducer; -import org.apache.hadoop.conf.*; -import org.apache.hadoop.util.*; -import org.apache.hadoop.*; -import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class IDBFileInputFormat extends org.apache.hadoop.mapred.FileInputFormat { - - private static final Logger LOG = LoggerFactory.getLogger(DBInputFormat.class); - - @Override - public RecordReader getRecordReader(InputSplit arg0, JobConf arg1, - Reporter arg2) throws IOException - { - final String filename = ((FileSplit)arg0).getPath().toString(); - final JobConf job = arg1; - - return new RecordReader() - { - private boolean unread = true; - - @Override - public void close() throws IOException - {} - - @Override - public NullWritable createKey() - { - return NullWritable.get(); - } - - @Override - public NullWritable createValue() - { - return NullWritable.get(); - } - - @Override - public long getPos() throws IOException - { - return 0; - } - - @Override - public float getProgress() throws IOException - { - return unread ? 0 : 1; - } - - @Override - /* spawn a cpimport process for each input file */ - public boolean next(NullWritable arg0, NullWritable arg1) throws IOException - { - InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job); - String schemaName = dbConf.getOutputSchemaName(); - String tableName = (filename.substring(filename.lastIndexOf('/')+1, filename.length())); - tableName = tableName.substring(0, tableName.lastIndexOf('.')); - String output = job.get("mapred.output.dir"); - if (unread) - { - try - { - StringBuilder loadCmdStr = new StringBuilder(); - loadCmdStr.append(dbConf.getInfiniDBHome()); - loadCmdStr.append("/bin/"); - loadCmdStr.append("infinidoop_load.sh "); - loadCmdStr.append(filename); - loadCmdStr.append(" "); - loadCmdStr.append(schemaName); - loadCmdStr.append(" "); - loadCmdStr.append(tableName); - - Process lChldProc = Runtime.getRuntime().exec(loadCmdStr.toString()); - - // Wait for the child to exit - lChldProc.waitFor(); - BufferedReader lChldProcOutStream = new BufferedReader(new InputStreamReader(lChldProc.getInputStream())); - BufferedReader stdError = new BufferedReader(new InputStreamReader(lChldProc.getErrorStream())); - - String lChldProcOutPutStr = null; - StringBuffer outpath = new StringBuffer(); - outpath.append(job.getWorkingDirectory()); - outpath.append("/"); - outpath.append(output); - outpath.append("/"); - outpath.append(tableName); - outpath.append(".log"); - - Path pt=new Path(outpath.toString()); - FileSystem fs = FileSystem.get(new Configuration()); - BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, false))); - - // catch output - while ((lChldProcOutPutStr = lChldProcOutStream.readLine()) != null) - { - br.write(lChldProcOutPutStr); - br.newLine(); - } - - // catch error - while ((lChldProcOutPutStr = stdError.readLine()) != null) - { - br.write(lChldProcOutPutStr); - br.newLine(); - } - - //br.write(outpath.toString()); - //br.newLine(); - //br.write(loadCmdStr.toString()); - //br.newLine(); - //br.write(filename); - br.close(); - - lChldProcOutStream.close(); - } - catch(Exception e) - { - e.printStackTrace(); - } - unread = false; - return true; - } - else - { - return false; - } - } - }; - } - - @Override - protected boolean isSplitable(FileSystem fs, Path filename) - { - return false; - } - - } diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBConfiguration.java b/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBConfiguration.java deleted file mode 100755 index 0c3d585bc..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBConfiguration.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package infinidb.hadoop.db; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; - -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable; -import org.apache.hadoop.mapred.lib.db.*; - - -/** - * A container for configuration property names for jobs with DB input/output. - *
- * The job can be configured using the static methods in this class, - * {@link DBInputFormat}, and {@link DBOutputFormat}. - *

- * Alternatively, the properties can be set in the configuration with proper - * values. - * - * @see DBConfiguration#configureDB(JobConf, String, String, String, String) - * @see DBInputFormat#setInput(JobConf, Class, String, String) - * @see DBInputFormat#setInput(JobConf, Class, String, String, String, String...) - * @see DBOutputFormat#setOutput(JobConf, String, String...) - */ -public class InfiniDBConfiguration{ - -/** Input schema name */ -public static final String INPUT_SCHEMA_NAME_PROPERTY = "idb_hadoop.input.schema.name"; - -/** Output schema name */ -public static final String OUTPUT_SCHEMA_NAME_PROPERTY = "idb_hadoop.output.schema.name"; - -/** Output table name */ -public static final String OUTPUT_TABLE_NAMES_PROPERTY = "idb_hadoop.output.table.name"; - -/** @InfiniDB Split key for split the query task */ -public static final String INPUT_SPLITKEY_NAME_PROPERTY = "idb_hadoop.splitkey.name"; - -/** @InfiniDB Split key min value */ -public static final String INPUT_SPLITKEY_MIN_VAL = "idb_hadoop.splitkey.min.value"; - -/** @InfiniDB Split key max value */ -public static final String INPUT_SPLITKEY_MAX_VAL = "idb_hadoop.splitkey.max.value"; - -/** @InfiniDB HOME path */ -public static final String INFINIDB_HOME = "idb_hadoop.infinidb.home.path"; - -/** Input dir */ -public static final String INPUT_PATH = "mapred.input.dir"; - -/** Output dir */ -public static final String OUTPUT_PATH = "mapred.output.dir"; - -/** - * Sets the DB access related fields in the JobConf. - * @param job the job - * @param driverClass JDBC Driver class name - * @param dbUrl JDBC DB access URL. - * @param userName DB access username - * @param passwd DB access passwd - */ -public static void configureDB(JobConf job, String driverClass, String dbUrl - , String userName, String passwd) -{ - - job.set(DBConfiguration.DRIVER_CLASS_PROPERTY, driverClass); - job.set(DBConfiguration.URL_PROPERTY, dbUrl); - if(userName != null) - job.set(DBConfiguration.USERNAME_PROPERTY, userName); - if(passwd != null) - job.set(DBConfiguration.PASSWORD_PROPERTY, passwd); -} - -/** - * Sets the DB access related fields in the JobConf. - * @param job the job - * @param driverClass JDBC Driver class name - * @param dbUrl JDBC DB access URL. - */ -public static void configureDB(JobConf job, String driverClass, String dbUrl) -{ - configureDB(job, driverClass, dbUrl, null, null); -} - -private JobConf job; - -public InfiniDBConfiguration(JobConf job) -{ - this.job = job; -} - -/** Returns a connection object o the DB - * @throws ClassNotFoundException - * @throws SQLException - */ -Connection getConnection() throws IOException -{ - try - { - Class.forName(job.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); - }catch (ClassNotFoundException exception) - { - throw new IOException("Conection driver can not be loaded", exception); - } - - try - { - if(job.get(DBConfiguration.USERNAME_PROPERTY) == null) - { - return DriverManager.getConnection(job.get(DBConfiguration.URL_PROPERTY)); - } - else - { - return DriverManager.getConnection( - job.get(DBConfiguration.URL_PROPERTY), - job.get(DBConfiguration.USERNAME_PROPERTY), - job.get(DBConfiguration.PASSWORD_PROPERTY)); - } - }catch (SQLException exception) - { - throw new IOException("Conection can not be established", exception); - } -} - -String getInputSchemaName() -{ - return job.get(InfiniDBConfiguration.INPUT_SCHEMA_NAME_PROPERTY); -} - -void setInputSchemaName(String schemaName) -{ - job.set(InfiniDBConfiguration.INPUT_SCHEMA_NAME_PROPERTY, schemaName); -} - -String getInputTableName() -{ - return job.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY); -} - -void setInputTableName(String tableName) -{ - job.set(DBConfiguration.INPUT_TABLE_NAME_PROPERTY, tableName); -} - -String[] getInputFieldNames() -{ - return job.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); -} - -void setInputFieldNames(String... fieldNames) -{ - job.setStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY, fieldNames); -} - -String getInputConditions() -{ - return job.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY); -} - -void setInputConditions(String conditions) -{ - if (conditions != null && conditions.length() > 0) - job.set(DBConfiguration.INPUT_CONDITIONS_PROPERTY, conditions); -} - -String getInputOrderBy() -{ - return job.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY); -} - -/** @InfiniDB */ -void setSplitKey(String key) -{ - job.setStrings(InfiniDBConfiguration.INPUT_SPLITKEY_NAME_PROPERTY, key); -} - -/** @InfiniDB */ -String getSplitKey() -{ - return job.get(InfiniDBConfiguration.INPUT_SPLITKEY_NAME_PROPERTY); -} - -/** @InfiniDB */ -public void setMinVal(long value) -{ - job.setLong(INPUT_SPLITKEY_MIN_VAL, value); -} - -/** @InfiniDB */ -public Long getMinVal() -{ - if(job.get(INPUT_SPLITKEY_MIN_VAL)==null) - return null; - return job.getLong(INPUT_SPLITKEY_MIN_VAL, -1); -} - -/** @InfiniDB */ -public void setMaxVal(long value) -{ - job.setFloat(INPUT_SPLITKEY_MAX_VAL, value); -} - -/** @InfiniDB */ -public Long getMaxVal() -{ - if(job.get(INPUT_SPLITKEY_MAX_VAL)==null) - return null; - return job.getLong(INPUT_SPLITKEY_MAX_VAL, -1); -} - -void setInputOrderBy(String orderby) -{ - if(orderby != null && orderby.length() >0) - { - job.set(DBConfiguration.INPUT_ORDER_BY_PROPERTY, orderby); - } -} - -String getInputQuery() -{ - return job.get(DBConfiguration.INPUT_QUERY); -} - -void setInputQuery(String query) -{ - if(query != null && query.length() >0) - { - job.set(DBConfiguration.INPUT_QUERY, query); - } -} - -String getInputCountQuery() -{ - return job.get(DBConfiguration.INPUT_COUNT_QUERY); -} - -void setInputCountQuery(String query) -{ - if(query != null && query.length() >0) - { - job.set(DBConfiguration.INPUT_COUNT_QUERY, query); - } -} - - -Class getInputClass() -{ - return job.getClass(DBConfiguration.INPUT_CLASS_PROPERTY, NullDBWritable.class); -} - -void setInputClass(Class inputClass) -{ - job.setClass(DBConfiguration.INPUT_CLASS_PROPERTY, inputClass, DBWritable.class); -} - -String getOutputSchemaName() -{ - return job.get(InfiniDBConfiguration.OUTPUT_SCHEMA_NAME_PROPERTY); -} - -void setOutputSchemaName(String schemaName) -{ - job.set(InfiniDBConfiguration.OUTPUT_SCHEMA_NAME_PROPERTY, schemaName); -} - -String[] getOutputTableNames() -{ - return job.getStrings(InfiniDBConfiguration.OUTPUT_TABLE_NAMES_PROPERTY); -} - -void setOutputTableNames(String... tableNames) -{ - job.setStrings(InfiniDBConfiguration.OUTPUT_TABLE_NAMES_PROPERTY, tableNames); -} - -String[] getOutputFieldNames() -{ - return job.getStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY); -} - -void setOutputFieldNames(String... fieldNames) -{ - job.setStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY, fieldNames); -} - -public String getInfiniDBHome() -{ - return job.get(InfiniDBConfiguration.INFINIDB_HOME); -} - -public void setInfiniDBHome(String path) -{ - job.set(InfiniDBConfiguration.INFINIDB_HOME, path); -} - -public String getInputPath() -{ - return job.get(InfiniDBConfiguration.INPUT_PATH); -} - -public void setInputPath(String path) -{ - job.set(InfiniDBConfiguration.INPUT_PATH, path); -} - -public String getOutputPath() -{ - return job.get(InfiniDBConfiguration.OUTPUT_PATH); -} - -public void setOutputPath(String path) -{ - job.set(InfiniDBConfiguration.OUTPUT_PATH, path); -} - -} - - diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBInputFormat.java b/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBInputFormat.java deleted file mode 100755 index 5909a80fd..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBInputFormat.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.db; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.mapred.InputFormat; -import org.apache.hadoop.mapred.InputSplit; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobConfigurable; -import org.apache.hadoop.mapred.RecordReader; -import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.mapred.lib.db.*; - - -/** - * A InputFormat that reads input data from an SQL table. - *

- * DBInputFormat emits LongWritables containing the record number as - * key and DBWritables as value. - * - * The SQL query, and input class can be using one of the two - * setInput methods. - */ -public class InfiniDBInputFormat - implements InputFormat, JobConfigurable -{ - /** - * A RecordReader that reads records from a SQL table. - * Emits LongWritables containing the record number as - * key and DBWritables as value. - */ -protected class DBRecordReader implements RecordReader -{ - private ResultSet results; - - private Statement statement; - - private Class inputClass; - - private JobConf job; - - private InfiniDBInputSplit split; - - private long pos = 0; - - /** - * @param split The InputSplit to read data for - * @throws SQLException - */ - protected DBRecordReader(InfiniDBInputSplit split, Class inputClass, JobConf job) throws SQLException - { - this.inputClass = inputClass; - this.split = split; - this.job = job; - - statement = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); - - //statement.setFetchSize(Integer.MIN_VALUE); - results = statement.executeQuery(getSelectQuery()); - } - - /** @InfiniDB */ - public String concat(S[] arr, String sep) - { - String ret = ""; - for(int i=0; i < arr.length; i++) - { - ret = ret + arr[i]; - if(i < arr.length-1) - { - ret = ret + sep; - } - } - return ret; - } - - /** @InfiniDB Returns the query for selecting the records, - * subclasses can override this for custom behaviour.*/ - protected String getSelectQuery() - { - InfiniDBConfiguration conf = new InfiniDBConfiguration(job); - StringBuilder query = new StringBuilder(); - query.append("SELECT "); - query.append(concat(conf.getInputFieldNames(), ",")); - query.append(" FROM "); - query.append(conf.getInputTableName()); - query.append(" WHERE "); - query.append(split.splitKey + ">=" + split.getStart()); - query.append(" AND "); - query.append(split.splitKey + "<" + split.getEnd()); - if (conditions != null && conditions.length() > 0) - query.append(" AND (").append(conditions).append(")"); - return query.toString(); - } - -/** {@inheritDoc} */ -public void close() throws IOException -{ - try - { - connection.commit(); - results.close(); - statement.close(); - } catch (SQLException e) - { - throw new IOException(e.getMessage()); - } -} - - /** {@inheritDoc} */ -public LongWritable createKey() -{ - return new LongWritable(); -} - -/** {@inheritDoc} */ -public T createValue() -{ - return ReflectionUtils.newInstance(inputClass, job); -} - -/** {@inheritDoc} */ -public long getPos() throws IOException -{ - return pos; -} - -/** {@inheritDoc} */ -public float getProgress() throws IOException -{ - return pos / (float)split.getLength(); -} - -/** {@inheritDoc} */ -public boolean next(LongWritable key, T value) throws IOException -{ - try - { - if (!results.next()) - return false; - - // Set the key field value as the output key value - key.set(pos + split.getStart()); - - value.readFields(results); - - pos ++; - } catch (SQLException e) - { - throw new IOException(e.getMessage()); - } - return true; -} -} - - /** - * A Class that does nothing, implementing DBWritable - */ - public static class NullDBWritable implements DBWritable, Writable { - @Override - public void readFields(DataInput in) throws IOException { } - @Override - public void readFields(ResultSet arg0) throws SQLException { } - @Override - public void write(DataOutput out) throws IOException { } - @Override - public void write(PreparedStatement arg0) throws SQLException { } - } - /** - * A InputSplit that spans a set of rows - */ - protected static class InfiniDBInputSplit implements InputSplit { - - private long end = 0; - private long start = 0; - private String splitKey; - - /** - * Default Constructor - */ - public InfiniDBInputSplit() { - } - - /** - * @InfiniDB - * Convenience Constructor - * @param start the index of the first row to select - * @param end the index of the last row to select - */ - public InfiniDBInputSplit(long start, long end, String key) { - this.start = start; - this.end = end; - this.splitKey = key; - } - - /** {@inheritDoc} */ - public String[] getLocations() throws IOException { - return new String[] {}; - } - - /** - * @return The index of the first row to select - */ - public long getStart() { - return start; - } - - /** - * @return The index of the last row to select - */ - public long getEnd() { - return end; - } - - /** - * @return The total row count in this split - */ - public long getLength() throws IOException { - return end - start; - } - - /** {@inheritDoc} */ - public void readFields(DataInput input) throws IOException { - start = input.readLong(); - end = input.readLong(); - splitKey = WritableUtils.readString(input); - } - - /** {@inheritDoc} */ - public void write(DataOutput output) throws IOException { - output.writeLong(start); - output.writeLong(end); - WritableUtils.writeString(output, splitKey); - } - } - - private String conditions; - - private Connection connection; - - private String tableName; - - private String[] fieldNames; - - private InfiniDBConfiguration dbConf; - - /** {@inheritDoc} */ - public void configure(JobConf job) { - - dbConf = new InfiniDBConfiguration(job); - - try { - this.connection = dbConf.getConnection(); - this.connection.setAutoCommit(false); - connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - } - catch (Exception ex) { - throw new RuntimeException(ex); - } - - tableName = dbConf.getInputTableName(); - fieldNames = dbConf.getInputFieldNames(); - conditions = dbConf.getInputConditions(); - } - - /** {@inheritDoc} */ - @SuppressWarnings("unchecked") - public RecordReader getRecordReader(InputSplit split, - JobConf job, Reporter reporter) throws IOException { - - Class inputClass = dbConf.getInputClass(); - try { - return new DBRecordReader((InfiniDBInputSplit) split, inputClass, job); - } - catch (SQLException ex) { - throw new IOException(ex.getMessage()); - } - } - - /** @InfiniDB */ - private long getMaxVal(InfiniDBConfiguration conf, Connection conn, String tableName, String col) { - if(conf.getMaxVal()!=null) { - return conf.getMaxVal(); - } - try { - PreparedStatement s = conn.prepareStatement("SELECT MAX(" + col + ") FROM " + tableName); - ResultSet rs = s.executeQuery(); - rs.next(); - long ret = rs.getLong(1); - rs.close(); - s.close(); - return ret; - } catch(SQLException e) { - throw new RuntimeException(e); - } - } - - /** @InfiniDB */ - private long getMinVal(InfiniDBConfiguration conf, Connection conn, String tableName, String col ) { - if(conf.getMinVal()!=null) { - return conf.getMinVal(); - } - try { - PreparedStatement s = conn.prepareStatement("SELECT MIN(" + col + ") FROM " + tableName); - ResultSet rs = s.executeQuery(); - rs.next(); - long ret = rs.getLong(1); - rs.close(); - s.close(); - return ret; - } catch(SQLException e) { - throw new RuntimeException(e); - } - } - - - /** {@inheritDoc} - * @InfiniDB - */ - public InputSplit[] getSplits(JobConf job, int chunks) throws IOException { - - try { - InfiniDBConfiguration conf = new InfiniDBConfiguration(job); - Connection conn = conf.getConnection(); - String splitKey = conf.getSplitKey(); - long maxVal = getMaxVal(conf, conn, conf.getInputTableName(), conf.getSplitKey()); - long minVal = getMinVal(conf, conn, conf.getInputTableName(), conf.getSplitKey()); - System.out.println("max=" + maxVal); - System.out.println("min=" + minVal); - - InputSplit[] ret = new InputSplit[chunks]; - long chunkSize = (maxVal - minVal + 1) / chunks + 1; - long start = minVal; - for (int i = 0; i < chunks; i++){ - ret[i] = new InfiniDBInputSplit(start, start+chunkSize, splitKey); - start += chunkSize; - } - - conn.close(); - return ret; - } catch(SQLException e) { - throw new RuntimeException(e); - } - -} - - /** Returns the query for getting the total number of rows, - * subclasses can override this for custom behaviour.*/ - protected String getCountQuery() { - - if(dbConf.getInputCountQuery() != null) { - return dbConf.getInputCountQuery(); - } - - StringBuilder query = new StringBuilder(); - query.append("SELECT COUNT(*) FROM " + tableName); - - if (conditions != null && conditions.length() > 0) - query.append(" WHERE " + conditions); - return query.toString(); - } - - /** - * @InfiniDB - * Initializes the map-part of the job with the appropriate input settings. - * - * @param job The job - * @param inputClass the class object implementing DBWritable, which is the - * Java object holding tuple fields. - * @param tableName The table to read data from - * @param conditions The condition which to select data with, eg. '(updated > - * 20070101 AND length > 0)' - * @param key the field name used for split key. - * @param fieldNames The field names in the table - * @see #setInput(JobConf, Class, String, String) - */ - public static void setInput(JobConf job, Class inputClass, - String tableName,String conditions, String key, String... fieldNames) { - - job.setInputFormat(InfiniDBInputFormat.class); - InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job); - dbConf.setInputClass(inputClass); - dbConf.setInputTableName(tableName); - dbConf.setInputFieldNames(fieldNames); - dbConf.setInputConditions(conditions); - dbConf.setSplitKey(key); - } - - /** - * @InfiniDB - * Initializes the map-part of the job with the appropriate input settings. - * - * @param job The job - * @param inputClass the class object implementing DBWritable, which is the - * Java object holding tuple fields. - * @param inputQuery the input query to select fields. Example : - * "SELECT f1, f2, f3 FROM Mytable ORDER BY f1" - * @param inputCountQuery the input query that returns the number of records in - * the table. - * Example : "SELECT COUNT(f1) FROM Mytable" - * @see #setInput(JobConf, Class, String, String, String, String...) - */ - public static void setInput(JobConf job, Class inputClass, - String inputQuery, String inputCountQuery) { - job.setInputFormat(InfiniDBInputFormat.class); - - InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job); - dbConf.setInputClass(inputClass); - dbConf.setInputQuery(inputQuery); - dbConf.setInputCountQuery(inputCountQuery); - - } - -} diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBOutputFormat.java b/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBOutputFormat.java deleted file mode 100755 index 2427df45c..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/db/InfiniDBOutputFormat.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.db; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.FileSplit; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.OutputFormat; -import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.mapred.lib.db.*; - - -/** - * A OutputFormat that sends the reduce output to a SQL table. - *

- * {@link DBOutputFormat} accepts <key,value> pairs, where - * key has a type extending DBWritable. Returned {@link RecordWriter} - * writes only the key to the database with a batch SQL query. - * - */ -public class InfiniDBOutputFormat -implements OutputFormat { - - private static final Log LOG = LogFactory.getLog(InfiniDBOutputFormat.class); - - /** - * A RecordWriter that writes the reduce output to a SQL table - */ - protected class DBRecordWriter - implements RecordWriter { - - private Connection connection; - private PreparedStatement statement; - - protected DBRecordWriter() throws SQLException - {} - - /** {@inheritDoc} */ - public void close(Reporter reporter) throws IOException - {} - - /** {@inheritDoc} */ - public void write(K key, V value) throws IOException - {} - } - - /** {@inheritDoc} */ - public void checkOutputSpecs(FileSystem filesystem, JobConf job) - throws IOException - {} - - -/** {@inheritDoc} */ -public RecordWriter getRecordWriter(FileSystem filesystem, - JobConf job, String name, Progressable progress) throws IOException -{ - try { - return new DBRecordWriter(); - } - catch (Exception ex) { - throw new IOException(ex.getMessage()); - } -} - -/** - * Initializes the reduce-part of the job with the appropriate output settings - * - * @param job - * The job - * @param tableName - * The table to insert data into - * @param fieldNames - * The field names in the table. If unknown, supply the appropriate - * number of nulls. - */ -public static void setOutput(JobConf job, String schemaName, String ... tableNames) -{ - job.setOutputFormat(InfiniDBOutputFormat.class); - job.setReduceSpeculativeExecution(false); - - InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job); - dbConf.setOutputSchemaName(schemaName); - dbConf.setOutputTableNames(tableNames); -} - -/** - * Initializes the reduce-part of the job with the appropriate output settings - * - * @param job - * The job - * @param tableName - * The table to insert data into - * @param fieldNames - * The field names in the table. If unknown, supply the appropriate - * number of nulls. - */ -public static void setOutput(JobConf job, String schemaName) -{ - job.setOutputFormat(InfiniDBOutputFormat.class); - job.setReduceSpeculativeExecution(false); - - InfiniDBConfiguration dbConf = new InfiniDBConfiguration(job); - - dbConf.setOutputSchemaName(schemaName); -} -} - diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDBOutputDriver.java b/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDBOutputDriver.java deleted file mode 100755 index 307624995..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDBOutputDriver.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.example; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.sql.*; -import java.util.Date; -import java.util.Formatter; -import java.io.IOException; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.*; -import org.apache.hadoop.mapred.lib.db.*; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.lib.input.*; -import org.apache.hadoop.mapreduce.lib.output.*; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.Reducer; -import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.mapred.SequenceFileInputFormat; -import org.apache.hadoop.conf.*; -import org.apache.hadoop.util.*; -import org.apache.hadoop.*; - -import infinidb.hadoop.db.*; - -public class InfiniDBOutputDriver extends Configured implements Tool -{ - public int run (String[] args) throws Exception - { - Configuration conf = new Configuration(); - JobConf jobconf = new JobConf(conf, InfiniDoopDriver.class); - DBConfiguration.configureDB(jobconf, - "com.mysql.jdbc.Driver", - "jdbc:mysql://srvswint4/tpch1","root", ""); - String [] fields = { "n_nationkey", "n_name" }; - String [] outFields = {"id", "name"}; - jobconf.setInputFormat(IDBFileInputFormat.class); - jobconf.setOutputFormat(InfiniDBOutputFormat.class); - jobconf.setOutputKeyClass(NullWritable.class); - jobconf.setOutputValueClass(Text.class); - InfiniDBOutputFormat.setOutput(jobconf, "db", outFields); - InfiniDBConfiguration idbconf = new InfiniDBConfiguration(jobconf); - idbconf.setInputPath("input"); - idbconf.setOutputPath("output"); - idbconf.setInfiniDBHome("/usr/local/mariadb/columnstore"); - - jobconf.setMapperClass(InfiniDoopMapper.class); - jobconf.setNumMapTasks(1); - jobconf.setNumReduceTasks(2); - JobClient client = new JobClient(); - client.setConf(jobconf); - try { - JobClient.runJob(jobconf); - } catch (Exception e) { - e.printStackTrace(); - } - - return 0; -} - -public static void main(String [] args) throws Exception -{ - int ret = ToolRunner.run(new InfiniDBOutputDriver(), args); - System.exit(ret); -} - -} diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopDriver.java b/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopDriver.java deleted file mode 100755 index 74bce0ea3..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopDriver.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.example; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.sql.*; -import java.util.Date; -import java.util.Formatter; -import java.io.IOException; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.lib.db.*; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.lib.input.*; -import org.apache.hadoop.mapreduce.lib.output.*; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.Reducer; -import org.apache.hadoop.conf.*; -import org.apache.hadoop.util.*; -import org.apache.hadoop.*; - -import infinidb.hadoop.db.*; -import infinidb.hadoop.db.InfiniDBConfiguration; - -public class InfiniDoopDriver extends Configured implements Tool -{ - public int run (String[] args) throws Exception - { - Configuration conf = new Configuration(); - JobConf jobconf = new JobConf(conf, InfiniDoopDriver.class); - DBConfiguration.configureDB(jobconf, - "com.mysql.jdbc.Driver", - "jdbc:mysql://srvswint4/tpch1","root", ""); - String [] fields = { "n_nationkey", "n_name" }; - jobconf.setInputFormat(InfiniDBInputFormat.class); - - jobconf.setOutputKeyClass(LongWritable.class); - jobconf.setOutputValueClass(Text.class); - - InfiniDBInputFormat.setInput(jobconf, InfiniDoopRecord.class, "nation", - null, "n_nationkey", fields); - - InfiniDBConfiguration idbconf = new InfiniDBConfiguration(jobconf); - idbconf.setOutputPath("output2"); - jobconf.setMapperClass(InfiniDoopInputMapper.class); - jobconf.setNumMapTasks(4); - jobconf.setNumReduceTasks(1); - jobconf.set("mapred.textoutputformat.separator", "|"); - JobClient client = new JobClient(); - - client.setConf(jobconf); - try { - JobClient.runJob(jobconf); - } catch (Exception e) { - e.printStackTrace(); - } - - return 0; -} - public static void main(String [] args) throws Exception - { - int ret = ToolRunner.run(new InfiniDoopDriver(), args); - System.exit(ret); - } - -} diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopInputMapper.java b/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopInputMapper.java deleted file mode 100755 index ebc8b6e30..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopInputMapper.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.example; - -import java.io.IOException; -import java.io.*; -import java.sql.*; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.lib.db.*; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.MapReduceBase; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.mapred.Reporter; - -public class InfiniDoopInputMapper extends MapReduceBase implements - Mapper { - - public void map(LongWritable key, InfiniDoopRecord val, - OutputCollector output, Reporter reporter) throws IOException { - output.collect(new LongWritable(val.id), new Text(val.name)); - } - -} diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopMapper.java b/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopMapper.java deleted file mode 100755 index 5efd50631..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopMapper.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.example; - -import java.io.IOException; -import java.io.*; -import java.sql.*; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.lib.db.*; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.*; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.MapReduceBase; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.mapred.Reporter; - -/** Dummy mapper, basically doing nothing. the real job is invoked by input format */ -public class InfiniDoopMapper extends MapReduceBase implements - Mapper { - - public void map(NullWritable key, NullWritable val, - OutputCollector output, Reporter reporter) throws IOException { - NullWritable n = NullWritable.get(); - output.collect(n, n); - } -} - diff --git a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopRecord.java b/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopRecord.java deleted file mode 100755 index 2bc370ff1..000000000 --- a/utils/infinidb_hadoop/src/infinidb/hadoop/example/InfiniDoopRecord.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2014 InfiniDB, Inc. - * - * InfiniDB, Inc. licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package infinidb.hadoop.example; - -import java.io.IOException; -import java.io.*; -import java.sql.*; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.lib.db.*; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.MapReduceBase; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.mapred.Reporter; - -public class InfiniDoopRecord implements Writable, DBWritable, WritableComparable { - long id; - String name; - - public void readFields(DataInput in) throws IOException { - this.id = in.readLong(); - this.name = Text.readString(in); - } - - public void readFields(ResultSet resultSet) - throws SQLException { - this.id = resultSet.getLong(1); - this.name = resultSet.getString(2); - } - - public void write(DataOutput out) throws IOException { - out.writeLong(this.id); - Text.writeString(out, this.name); - } - - public void write(PreparedStatement stmt) throws SQLException { - stmt.setLong(1, this.id); - stmt.setString(2, this.name); - } - - public int compareTo(InfiniDoopRecord w) { - return (this.id < w.id ? -1 :(this.id == w.id ? 0 : 1)); - } - } diff --git a/utils/loggingcpp/CMakeLists.txt b/utils/loggingcpp/CMakeLists.txt index be2a058f9..b44cd8b21 100644 --- a/utils/loggingcpp/CMakeLists.txt +++ b/utils/loggingcpp/CMakeLists.txt @@ -30,5 +30,5 @@ set_target_properties(loggingcpp PROPERTIES VERSION 1.0.0 SOVERSION 1) install(TARGETS loggingcpp DESTINATION ${ENGINE_LIBDIR} COMPONENT libs) -install(FILES MessageFile.txt ErrorMessage.txt DESTINATION ${ENGINE_ETCDIR} COMPONENT platform) +install(FILES MessageFile.txt ErrorMessage.txt DESTINATION ${ENGINE_SYSCONFDIR}/columnstore COMPONENT platform) diff --git a/utils/loggingcpp/idberrorinfo.cpp b/utils/loggingcpp/idberrorinfo.cpp index 4932634c0..a2e9056d2 100644 --- a/utils/loggingcpp/idberrorinfo.cpp +++ b/utils/loggingcpp/idberrorinfo.cpp @@ -34,6 +34,7 @@ using namespace std; #include using namespace boost; +#include "config.h" #include "configcpp.h" using namespace config; #include "loggingid.h" @@ -64,7 +65,7 @@ IDBErrorInfo::IDBErrorInfo() string configFile(cf->getConfig("SystemConfig", "ErrorMessageFile")); if (configFile.length() == 0) - configFile = startup::StartUp::installDir() + "/etc/ErrorMessage.txt"; + configFile = std::string(MCSSYSCONFDIR) + "/columnstore/ErrorMessage.txt"; ifstream msgFile(configFile.c_str()); diff --git a/utils/loggingcpp/message.cpp b/utils/loggingcpp/message.cpp index 030567e4b..7f1468594 100644 --- a/utils/loggingcpp/message.cpp +++ b/utils/loggingcpp/message.cpp @@ -34,6 +34,7 @@ using namespace std; #include using namespace boost; +#include "config.h" #include "configcpp.h" using namespace config; #include "messageobj.h" @@ -56,7 +57,7 @@ void loadCatalog() string configFile(cf->getConfig("MessageLog", "MessageLogFile")); if (configFile.length() == 0) - configFile = startup::StartUp::installDir() + "/etc/MessageFile.txt"; + configFile = std::string(MCSSYSCONFDIR) + "/columnstore/MessageFile.txt"; ifstream msgFile(configFile.c_str()); diff --git a/utils/multicast/config.h b/utils/multicast/config.h deleted file mode 100644 index 1596ab1d9..000000000 --- a/utils/multicast/config.h +++ /dev/null @@ -1,208 +0,0 @@ -/* Copyright (C) 2014 InfiniDB, Inc. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; version 2 of - the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301, USA. */ - -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.in by autoheader. */ - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_INET_H 1 - -/* Define to 1 if you have the `atexit' function. */ -#define HAVE_ATEXIT 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you have the `dlsym' function. */ -#define HAVE_DLSYM 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_FCNTL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_GETOPT_H 1 - -/* Define to 1 if you have the `getopt_long' function. */ -#define HAVE_GETOPT_LONG 1 - -/* Define to 1 if you have the `htons' function. */ -#define HAVE_HTONS 1 - -/* Define to 1 if you have the `inet_aton' function. */ -#define HAVE_INET_ATON 1 - -/* Define to 1 if you have the `inet_pton' function. */ -#define HAVE_INET_PTON 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if the system has the type `in_addr_t'. */ -#define HAVE_IN_ADDR_T 1 - -/* Define to 1 if you have the `dl' library (-ldl). */ -#define HAVE_LIBDL 1 - -/* Define to 1 if you have the `pthread' library (-lpthread). */ -#define HAVE_LIBPTHREAD 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LIMITS_H 1 - -/* Define when you have an LLSEEK prototype */ -/* #undef HAVE_LLSEEK_PROTOTYPE */ - -/* Define when the compiler supports LOFF_T type */ -#define HAVE_LOFF_T 1 - -/* Define when the compiler supports LONG_LONG type */ -#define HAVE_LONG_LONG 1 - -/* Define to 1 if you have the `lseek64' function. */ -#define HAVE_LSEEK64 1 - -/* Define when you have an LSEEK64 prototype */ -#define HAVE_LSEEK64_PROTOTYPE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MALLOC_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETDB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_IN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NET_IF_H 1 - -/* Define when the compiler supports OFFSET_T type */ -/* #undef HAVE_OFFSET_T */ - -/* Define when the system has a 64 bit off_t type */ -#define HAVE_OFF_T_64 1 - -/* Define to 1 if you have the `on_exit' function. */ -#define HAVE_ON_EXIT 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SIGNAL_H 1 - -/* Define to 1 if you have the `snprintf' function. */ -#define HAVE_SNPRINTF 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if `imr_ifindex' is member of `struct ip_mreqn'. */ -#define HAVE_STRUCT_IP_MREQN_IMR_IFINDEX 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_IOCTL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_PARAM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SELECT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SOCKET_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_SOCKIO_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_TERMIOS_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UIO_H 1 - -/* Define to 1 if you have that is POSIX.1 compatible. */ -#define HAVE_SYS_WAIT_H 1 - -/* Define to 1 if you have the `tcsetattr' function. */ -#define HAVE_TCSETATTR 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_TERMIOS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_WINSOCK2_H */ - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "" - -/* Define as the return type of signal handlers (`int' or `void'). */ -#define RETSIGTYPE void - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to 1 if you can safely include both and . */ -#define TIME_WITH_SYS_TIME 1 - -/* Define to 1 if your declares `struct tm'. */ -/* #undef TM_IN_SYS_TIME */ - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/utils/thrift/thrift/config.h b/utils/thrift/thrift/config.h deleted file mode 100644 index e60e4c1ca..000000000 --- a/utils/thrift/thrift/config.h +++ /dev/null @@ -1,427 +0,0 @@ -/* lib/cpp/src/thrift/config.h. Generated from config.hin by configure. */ -/* config.hin. Generated from configure.ac by autoheader. */ - - -#ifndef CONFIG_H -#define CONFIG_H - - -/* Define if the AI_ADDRCONFIG symbol is unavailable */ -/* #undef AI_ADDRCONFIG */ - -/* Possible value for SIGNED_RIGHT_SHIFT_IS */ -#define ARITHMETIC_RIGHT_SHIFT 1 - -/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP - systems. This function is required for `alloca.c' support on those systems. - */ -/* #undef CRAY_STACKSEG_END */ - -/* Define to 1 if using `alloca.c'. */ -/* #undef C_ALLOCA */ - -/* Define to 1 if you have the `alarm' function. */ -#define HAVE_ALARM 1 - -/* Define to 1 if you have `alloca', as a function or macro. */ -#define HAVE_ALLOCA 1 - -/* Define to 1 if you have and it should be used (not on Ultrix). - */ -#define HAVE_ALLOCA_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_INET_H 1 - -/* define if the Boost library is available */ -#define HAVE_BOOST /**/ - -/* Define to 1 if you have the `bzero' function. */ -#define HAVE_BZERO 1 - -/* Define to 1 if you have the `clock_gettime' function. */ -#define HAVE_CLOCK_GETTIME 1 - -/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you - don't. */ -#define HAVE_DECL_STRERROR_R 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ -/* #undef HAVE_DOPRNT */ - -/* Define to 1 if you have the header file. */ -#define HAVE_FCNTL_H 1 - -/* Define to 1 if you have the `fork' function. */ -#define HAVE_FORK 1 - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `gethostbyname' function. */ -#define HAVE_GETHOSTBYNAME 1 - -/* Define to 1 if you have the `gettimeofday' function. */ -#define HAVE_GETTIMEOFDAY 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* define if libevent is available */ -/* #undef HAVE_LIBEVENT */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBINTL_H 1 - -/* Define to 1 if you have the `pthread' library (-lpthread). */ -#define HAVE_LIBPTHREAD 1 - -/* Define to 1 if you have the `rt' library (-lrt). */ -#define HAVE_LIBRT 1 - -/* Define to 1 if you have the `socket' library (-lsocket). */ -/* #undef HAVE_LIBSOCKET */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIMITS_H 1 - -/* Define to 1 if your system has a GNU libc compatible `malloc' function, and - to 0 otherwise. */ -#define HAVE_MALLOC 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MALLOC_H 1 - -/* Define to 1 if you have the `memmove' function. */ -#define HAVE_MEMMOVE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `memset' function. */ -#define HAVE_MEMSET 1 - -/* Define to 1 if you have the `mkdir' function. */ -#define HAVE_MKDIR 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETDB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_IN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_RAND_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_SSL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_X509V3_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_PTHREAD_H 1 - -/* Define to 1 if the system has the type `ptrdiff_t'. */ -#define HAVE_PTRDIFF_T 1 - -/* Define to 1 if your system has a GNU libc compatible `realloc' function, - and to 0 otherwise. */ -#define HAVE_REALLOC 1 - -/* Define to 1 if you have the `realpath' function. */ -#define HAVE_REALPATH 1 - -/* Define to 1 if you have the `sched_get_priority_max' function. */ -#define HAVE_SCHED_GET_PRIORITY_MAX 1 - -/* Define to 1 if you have the `sched_get_priority_min' function. */ -#define HAVE_SCHED_GET_PRIORITY_MIN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SCHED_H 1 - -/* Define to 1 if you have the `select' function. */ -#define HAVE_SELECT 1 - -/* Define to 1 if you have the `socket' function. */ -#define HAVE_SOCKET 1 - -/* Define to 1 if you have the `sqrt' function. */ -#define HAVE_SQRT 1 - -/* Define to 1 if `stat' has the bug that it succeeds when given the - zero-length file name argument. */ -/* #undef HAVE_STAT_EMPTY_STRING_BUG */ - -/* Define to 1 if stdbool.h conforms to C99. */ -#define HAVE_STDBOOL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDDEF_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the `strchr' function. */ -#define HAVE_STRCHR 1 - -/* Define to 1 if you have the `strdup' function. */ -#define HAVE_STRDUP 1 - -/* Define to 1 if you have the `strerror' function. */ -#define HAVE_STRERROR 1 - -/* Define to 1 if you have the `strerror_r' function. */ -#define HAVE_STRERROR_R 1 - -/* Define to 1 if you have the `strftime' function. */ -#define HAVE_STRFTIME 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strstr' function. */ -#define HAVE_STRSTR 1 - -/* Define to 1 if you have the `strtol' function. */ -#define HAVE_STRTOL 1 - -/* Define to 1 if you have the `strtoul' function. */ -#define HAVE_STRTOUL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_PARAM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_POLL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_RESOURCE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SELECT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SOCKET_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UN_H 1 - -/* Define to 1 if you have that is POSIX.1 compatible. */ -#define HAVE_SYS_WAIT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vfork' function. */ -#define HAVE_VFORK 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_VFORK_H */ - -/* Define to 1 if you have the `vprintf' function. */ -#define HAVE_VPRINTF 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_WCHAR_H 1 - -/* Define to 1 if `fork' works. */ -#define HAVE_WORKING_FORK 1 - -/* Define to 1 if `vfork' works. */ -#define HAVE_WORKING_VFORK 1 - -/* define if zlib is available */ -#define HAVE_ZLIB /**/ - -/* Define to 1 if the system has the type `_Bool'. */ -/* #undef HAVE__BOOL */ - -/* Possible value for SIGNED_RIGHT_SHIFT_IS */ -#define LOGICAL_RIGHT_SHIFT 2 - -/* Define to 1 if `lstat' dereferences a symlink specified with a trailing - slash. */ -#define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#define LT_OBJDIR ".libs/" - -/* Name of package */ -#define PACKAGE "thrift" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "thrift" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "thrift 0.9.1" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "thrift" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "0.9.1" - -/* Define as the return type of signal handlers (`int' or `void'). */ -#define RETSIGTYPE void - -/* Define to the type of arg 1 for `select'. */ -#define SELECT_TYPE_ARG1 int - -/* Define to the type of args 2, 3 and 4 for `select'. */ -#define SELECT_TYPE_ARG234 (fd_set *) - -/* Define to the type of arg 5 for `select'. */ -#define SELECT_TYPE_ARG5 (struct timeval *) - -/* Indicates the effect of the right shift operator on negative signed - integers */ -#define SIGNED_RIGHT_SHIFT_IS 1 - -/* If using the C implementation of alloca, define if you know the - direction of stack growth for your system; otherwise it will be - automatically deduced at runtime. - STACK_DIRECTION > 0 => grows toward higher addresses - STACK_DIRECTION < 0 => grows toward lower addresses - STACK_DIRECTION = 0 => direction of growth unknown */ -/* #undef STACK_DIRECTION */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to 1 if strerror_r returns char *. */ -#define STRERROR_R_CHAR_P 1 - -/* Define to 1 if you can safely include both and . */ -#define TIME_WITH_SYS_TIME 1 - -/* Define to 1 if your declares `struct tm'. */ -/* #undef TM_IN_SYS_TIME */ - -/* Possible value for SIGNED_RIGHT_SHIFT_IS */ -#define UNKNOWN_RIGHT_SHIFT 3 - -/* experimental --enable-boostthreads that replaces POSIX pthread by - boost::thread */ -/* #undef USE_BOOST_THREAD */ - -/* Version number of package */ -#define VERSION "0.9.1" - -/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a - `char[]'. */ -#define YYTEXT_POINTER 1 - -/* Define for Solaris 2.5.1 so the uint32_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -/* #undef _UINT32_T */ - -/* Define for Solaris 2.5.1 so the uint64_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -/* #undef _UINT64_T */ - -/* Define for Solaris 2.5.1 so the uint8_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -/* #undef _UINT8_T */ - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to the type of a signed integer type of width exactly 16 bits if - such a type exists and the standard includes do not define it. */ -/* #undef int16_t */ - -/* Define to the type of a signed integer type of width exactly 32 bits if - such a type exists and the standard includes do not define it. */ -/* #undef int32_t */ - -/* Define to the type of a signed integer type of width exactly 64 bits if - such a type exists and the standard includes do not define it. */ -/* #undef int64_t */ - -/* Define to the type of a signed integer type of width exactly 8 bits if such - a type exists and the standard includes do not define it. */ -/* #undef int8_t */ - -/* Define to rpl_malloc if the replacement function should be used. */ -/* #undef malloc */ - -/* Define to `int' if does not define. */ -/* #undef mode_t */ - -/* Define to `long int' if does not define. */ -/* #undef off_t */ - -/* Define to `int' if does not define. */ -/* #undef pid_t */ - -/* Define to rpl_realloc if the replacement function should be used. */ -/* #undef realloc */ - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ - -/* Define to `int' if does not define. */ -/* #undef ssize_t */ - -/* Define to the type of an unsigned integer type of width exactly 16 bits if - such a type exists and the standard includes do not define it. */ -/* #undef uint16_t */ - -/* Define to the type of an unsigned integer type of width exactly 32 bits if - such a type exists and the standard includes do not define it. */ -/* #undef uint32_t */ - -/* Define to the type of an unsigned integer type of width exactly 64 bits if - such a type exists and the standard includes do not define it. */ -/* #undef uint64_t */ - -/* Define to the type of an unsigned integer type of width exactly 8 bits if - such a type exists and the standard includes do not define it. */ -/* #undef uint8_t */ - -/* Define as `fork' if `vfork' does not work. */ -/* #undef vfork */ - -/* Define to empty if the keyword `volatile' does not work. Warning: valid - code using `volatile' can become incorrect without. Disable with care. */ -/* #undef volatile */ - - -#endif - diff --git a/utils/thrift/thrift/thrift-config.h b/utils/thrift/thrift/thrift-config.h index b1bcccba3..e60e4c1ca 100644 --- a/utils/thrift/thrift/thrift-config.h +++ b/utils/thrift/thrift/thrift-config.h @@ -1,24 +1,427 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ +/* lib/cpp/src/thrift/config.h. Generated from config.hin by configure. */ +/* config.hin. Generated from configure.ac by autoheader. */ -#ifdef _WIN32 -# include -#else -# include + +#ifndef CONFIG_H +#define CONFIG_H + + +/* Define if the AI_ADDRCONFIG symbol is unavailable */ +/* #undef AI_ADDRCONFIG */ + +/* Possible value for SIGNED_RIGHT_SHIFT_IS */ +#define ARITHMETIC_RIGHT_SHIFT 1 + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to 1 if you have the `alarm' function. */ +#define HAVE_ALARM 1 + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* define if the Boost library is available */ +#define HAVE_BOOST /**/ + +/* Define to 1 if you have the `bzero' function. */ +#define HAVE_BZERO 1 + +/* Define to 1 if you have the `clock_gettime' function. */ +#define HAVE_CLOCK_GETTIME 1 + +/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you + don't. */ +#define HAVE_DECL_STRERROR_R 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the `fork' function. */ +#define HAVE_FORK 1 + +/* Define to 1 if you have the `ftruncate' function. */ +#define HAVE_FTRUNCATE 1 + +/* Define to 1 if you have the `gethostbyname' function. */ +#define HAVE_GETHOSTBYNAME 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* define if libevent is available */ +/* #undef HAVE_LIBEVENT */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIBINTL_H 1 + +/* Define to 1 if you have the `pthread' library (-lpthread). */ +#define HAVE_LIBPTHREAD 1 + +/* Define to 1 if you have the `rt' library (-lrt). */ +#define HAVE_LIBRT 1 + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if your system has a GNU libc compatible `malloc' function, and + to 0 otherwise. */ +#define HAVE_MALLOC 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memset' function. */ +#define HAVE_MEMSET 1 + +/* Define to 1 if you have the `mkdir' function. */ +#define HAVE_MKDIR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETDB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_IN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_RAND_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_X509V3_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if your system has a GNU libc compatible `realloc' function, + and to 0 otherwise. */ +#define HAVE_REALLOC 1 + +/* Define to 1 if you have the `realpath' function. */ +#define HAVE_REALPATH 1 + +/* Define to 1 if you have the `sched_get_priority_max' function. */ +#define HAVE_SCHED_GET_PRIORITY_MAX 1 + +/* Define to 1 if you have the `sched_get_priority_min' function. */ +#define HAVE_SCHED_GET_PRIORITY_MIN 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `select' function. */ +#define HAVE_SELECT 1 + +/* Define to 1 if you have the `socket' function. */ +#define HAVE_SOCKET 1 + +/* Define to 1 if you have the `sqrt' function. */ +#define HAVE_SQRT 1 + +/* Define to 1 if `stat' has the bug that it succeeds when given the + zero-length file name argument. */ +/* #undef HAVE_STAT_EMPTY_STRING_BUG */ + +/* Define to 1 if stdbool.h conforms to C99. */ +#define HAVE_STDBOOL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strchr' function. */ +#define HAVE_STRCHR 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the `vfork' function. */ +#define HAVE_VFORK 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_VFORK_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_WCHAR_H 1 + +/* Define to 1 if `fork' works. */ +#define HAVE_WORKING_FORK 1 + +/* Define to 1 if `vfork' works. */ +#define HAVE_WORKING_VFORK 1 + +/* define if zlib is available */ +#define HAVE_ZLIB /**/ + +/* Define to 1 if the system has the type `_Bool'. */ +/* #undef HAVE__BOOL */ + +/* Possible value for SIGNED_RIGHT_SHIFT_IS */ +#define LOGICAL_RIGHT_SHIFT 2 + +/* Define to 1 if `lstat' dereferences a symlink specified with a trailing + slash. */ +#define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "thrift" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "thrift" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "thrift 0.9.1" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "thrift" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "0.9.1" + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* Indicates the effect of the right shift operator on negative signed + integers */ +#define SIGNED_RIGHT_SHIFT_IS 1 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if strerror_r returns char *. */ +#define STRERROR_R_CHAR_P 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* Possible value for SIGNED_RIGHT_SHIFT_IS */ +#define UNKNOWN_RIGHT_SHIFT 3 + +/* experimental --enable-boostthreads that replaces POSIX pthread by + boost::thread */ +/* #undef USE_BOOST_THREAD */ + +/* Version number of package */ +#define VERSION "0.9.1" + +/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a + `char[]'. */ +#define YYTEXT_POINTER 1 + +/* Define for Solaris 2.5.1 so the uint32_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +/* #undef _UINT32_T */ + +/* Define for Solaris 2.5.1 so the uint64_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +/* #undef _UINT64_T */ + +/* Define for Solaris 2.5.1 so the uint8_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +/* #undef _UINT8_T */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* #undef inline */ #endif + +/* Define to the type of a signed integer type of width exactly 16 bits if + such a type exists and the standard includes do not define it. */ +/* #undef int16_t */ + +/* Define to the type of a signed integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +/* #undef int32_t */ + +/* Define to the type of a signed integer type of width exactly 64 bits if + such a type exists and the standard includes do not define it. */ +/* #undef int64_t */ + +/* Define to the type of a signed integer type of width exactly 8 bits if such + a type exists and the standard includes do not define it. */ +/* #undef int8_t */ + +/* Define to rpl_malloc if the replacement function should be used. */ +/* #undef malloc */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long int' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to rpl_realloc if the replacement function should be used. */ +/* #undef realloc */ + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + +/* Define to `int' if does not define. */ +/* #undef ssize_t */ + +/* Define to the type of an unsigned integer type of width exactly 16 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint16_t */ + +/* Define to the type of an unsigned integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint32_t */ + +/* Define to the type of an unsigned integer type of width exactly 64 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint64_t */ + +/* Define to the type of an unsigned integer type of width exactly 8 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint8_t */ + +/* Define as `fork' if `vfork' does not work. */ +/* #undef vfork */ + +/* Define to empty if the keyword `volatile' does not work. Warning: valid + code using `volatile' can become incorrect without. Disable with care. */ +/* #undef volatile */ + + +#endif + diff --git a/utils/thrift/thrift/windows/config.h b/utils/thrift/thrift/windows/config.h deleted file mode 100644 index 0555e079f..000000000 --- a/utils/thrift/thrift/windows/config.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef _THRIFT_WINDOWS_CONFIG_H_ -#define _THRIFT_WINDOWS_CONFIG_H_ 1 - -#if defined(_MSC_VER) && (_MSC_VER > 1200) -#pragma once -#endif // _MSC_VER - -#ifndef _WIN32 -#error This is a MSVC header only. -#endif - -// use std::thread in MSVC11 (2012) or newer -#if _MSC_VER >= 1700 -# define USE_STD_THREAD 1 -// otherwise use boost threads -#else -# define USE_BOOST_THREAD 1 -#endif - -#ifndef TARGET_WIN_XP -# define TARGET_WIN_XP 1 -#endif - -#if TARGET_WIN_XP -# ifndef WINVER -# define WINVER 0x0501 -# endif -# ifndef _WIN32_WINNT -# define _WIN32_WINNT 0x0501 -# endif -#endif - -#ifndef _WIN32_WINNT -# define _WIN32_WINNT 0x0601 -#endif - -#pragma warning(disable: 4996) // Deprecated posix name. - -#define VERSION "1.0.0-dev" -#define HAVE_GETTIMEOFDAY 1 -#define HAVE_SYS_STAT_H 1 - -#ifdef HAVE_STDINT_H -# include -#else -# include - -typedef boost::int64_t int64_t; -typedef boost::uint64_t uint64_t; -typedef boost::int32_t int32_t; -typedef boost::uint32_t uint32_t; -typedef boost::int16_t int16_t; -typedef boost::uint16_t uint16_t; -typedef boost::int8_t int8_t; -typedef boost::uint8_t uint8_t; -#endif - -#include -#include -#include -#include -#include -#include - -// windows -#include -#include -#pragma comment(lib, "Ws2_32.lib") -#pragma comment(lib, "advapi32.lib") //For security APIs in TPipeServer - -#endif // _THRIFT_WINDOWS_CONFIG_H_ diff --git a/writeengine/bulk/bulkload.py b/writeengine/bulk/bulkload.py deleted file mode 100644 index 9a6ab0e06..000000000 --- a/writeengine/bulk/bulkload.py +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/python -## -## Bulkloader script by Martin Thomas -## - -import os, sys, glob, shutil, xml.dom.minidom -import getopt -import logging -import time - -logger = logging.getLogger() -shdlr = logging.StreamHandler() -fhdlr = logging.FileHandler(filename='bulkload.log' ) -formatter = logging.Formatter('%(asctime)s:%(levelname)s: %(message)s') -shdlr.setFormatter(formatter) -fhdlr.setFormatter(formatter) -logger.addHandler(shdlr) -logger.addHandler(fhdlr) - -## only report INFO or higher - change to WARNING to silence all logging -logger.setLevel(logging.INFO) - - -def usage(): - print """ - - Bulkload.py is intended to automate the manual steps required to load the database and build indexes from scratch. - - - ipcs-pat will be built if missing - - cpimport will be removed and rebuilt - - PrimProc will be stopped and started - - shared memory sgements wil be removed using ipcs-pat - - database files will be removed - - dbgen will be run with option 5 - - oid files and job files will be copied to correct locations - - column data will be parsed and loaded using Job 299 - - index data will be exported, sorted and loaded using Job 300 - - Options: - -w or --wedir= : Specify the write engine branch to use instead of the default trunk - -n or --nocache= : Specify either col or idx and the -c flag will NOT be sent to cpimport - -u or --usage : Usage message - - Example: - bulkload.py -w/home/adevelop/genii/we1.1 --nocache=idx - Load the database using the we1.1 branch for writeengine and do not use cache when building indexes - - THIS SPACE LEFT INTENTIONALLY BLANK - """ - -def find_paths(): - - """Find DBRoot and BulkRoot.""" - try: - config_file = os.environ['COLUMNSTORE_CONFIG_FILE'] - except KeyError: - try: - logger.info("Environment variable COLUMNSTORE_CONFIG_FILE not set, looking for system Columnstore.xml") - config_file = '/usr/local/mariadb/columnstore/etc/Columnstore.xml' - os.lstat(config_file) - except: - logger.error('No config file available') - sys.exit('No config file available') - try: - xmldoc = xml.dom.minidom.parse(config_file) - bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0] - db_node = xmldoc.getElementsByTagName('DBRoot1')[0] - bulk_dir = bulk_node.childNodes[0].nodeValue - data_dir = db_node.childNodes[0].nodeValue - - except Exception, e: - logger.error('Error parsing config file') - logger.error(e) - sys.exit('Error parsing config file') - - return (bulk_dir, data_dir) - -def check_dirs(bulkroot, dbroot): - - problem = 0 - res = 0 - reqd_dirs = { - os.getenv('HOME')+'/genii' : "No genii directory found (contains tools required to continue) (%s)", - bulkroot: "Bulkroot specified as %s but not found", - bulkroot+'/job': "No job directory found - needed to store Job xml files (looked in %s)", - bulkroot+'/data/import': "No data/import directory found - expected %s to hold data to be loaded", - bulkroot+'/log': "No data/log directory found - expected %s to log into", - dbroot : "DBroot specified as %s but not found" - } - for dir in reqd_dirs.keys(): - try: - res = os.lstat(dir) - except: - problem = 1 - logger.error(reqd_dirs[dir]%dir) - - if problem: - sys.exit(1) - -def fix_hwm(job_file): - - """Find hwm in xml file and change to 0""" - - import re - - src_file = open(job_file, 'r') - dst_file = open(job_file+'.tmp', 'w') - - rep = re.compile('hwm="1"') - - for line in src_file: - line = rep.sub('hwm="0"', line) - dst_file.write(line) - # use os.rename instead of shutil.move to avoid problems traversing devices - os.rename(job_file+'.tmp', job_file) - -def find_indexes(job_file): - - """Find index definitions in job_file and return list of files to sort""" - - index_files = [] - try: # try because we may have an old version of python - xmldoc = xml.dom.minidom.parse(job_file) - - for index_node in xmldoc.getElementsByTagName('Index'): - index_files.append(index_node.getAttribute('mapName')) - except: - import re - f = open(job_file) - for line in f.read(): - b =re.search('mapName="(CPL_[0-9A-Z_]+)"', line) - try: # try because not every line will match - index_files.append(b.group(1)) - except: pass - - return index_files - -def exec_cmd(cmd, args): - """Execute command using subprocess module or if that fails, - use os.system - """ - - try: - import subprocess - - try: - retcode = call(cmd + " "+args, shell=True) - if retcode < 0: - print >>sys.stderr, "Child was terminated by signal", -retcode - sys.exit(-1) - - else: - print >>sys.stderr, "Child returned", retcode - - except OSError, e: - - print >>sys.stderr, "Execution failed:", e - sys.exit(-1) - except: - logger.info ('Old version of Python - subprocess not available, falling back to os.system') - logger.info ('Executing: '+cmd+' '+args) - res = os.system(cmd+' '+args) - if res: - logger.error('Bad return code %i from %s'%(res, cmd)) - sys.exit( res ) - - -def build_tool(tool): - """ - Use the tool dictionary to determine if required tool exists - and build if not - """ - - if not os.path.exists(tool['path']+tool['tool']): - logger.warn ("Building %s before continuing"%tool['tool']) - curdir=os.getcwd() - os.chdir(tool['path']) - exec_cmd(tool['builder'], tool['args']) - os.chdir(curdir) - -def main(): - """ - Bulk load the database.. - Check that we can write OIDfiles, that all required tools exist, - clean up old files, sort the index inserts and generally rock and roll - """ - start_dir = curdir=os.getcwd() # remember where we started - - if not os.environ.has_key('LD_LIBRARY_PATH'): - logger.info('No environment variable LD_LIBRARY_PATH') - else: - if len(os.getenv('LD_LIBRARY_PATH'))<5: - logger.info('Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH')) - - #-- figure out paths - home = os.getenv('HOME') - genii = home+'/genii' - cache = {} - cache['idx'] = '-c' - cache['col'] = '-c' - -#-- allow us to specify a write engine branch - opts, args = getopt.getopt(sys.argv[1:], 'w:n:u', ['wedir=', 'nocache=', 'usage']) - wedir = genii+'/writeengine' - for opt, arg in opts: - if opt =='-w' or opt =='--wedir': - wedir = arg - - if opt == '-n' or opt == '--nocache': - if (arg=='idx' or arg=='col'): - cache[arg] = '' - logger.info("No cache for %s"% arg) - - if opt == '-u' or opt == '--usage': - usage() - sys.exit() - - logger.info("Using writengine at %s"%wedir) - - (bulkroot, dbroot) = find_paths() - - logger.info ("Bulkroot: %s \tDBRoot: %s\n"%(bulkroot, dbroot)) - - check_dirs(bulkroot, dbroot) - - if len(glob.glob(bulkroot+'/data/import/*tbl')) == 0: - sys.exit("No files for import found in BulkRoot: %s"%(bulkroot)) - - if len(glob.glob(dbroot+'/000.dir'))==0: - logger.info("No files found in DBRoot: %s (not fatal)"%dbroot) - -## force rebuild cpimport and build ipcs-pat if required - - build_tool({'path':genii+'/versioning/BRM/', - 'tool':'ipcs-pat', - 'builder':'make', 'args':'tools'}) - - build_tool({'path':wedir+'/bulk/', - 'tool':'cpimport', - 'builder':'make', 'args':'clean'}) - try: - exec_cmd('rm -f', wedir+'/bulk/cpimport') - except: - pass - - try: - os.lstat(start_dir+'/cpimport') # look in local directory first - except: - build_tool({'path':wedir+'/bulk/', - 'tool':'cpimport', - 'builder':'make', 'args':'cpimport'}) - - -## clean up before starting -## remove old db files, removed old temp files, remove shared memory segments, -## kill old PrimProc and start new one - - logger.info ("Removing old DB files") - exec_cmd('rm -fr ', dbroot+'/000.dir') - - logger.info ("Removing old temp files") - exec_cmd('rm -fr ', bulkroot+'/data/import/*.idx.txt') - - logger.info ("Removing old process files") - exec_cmd('rm -fr ', bulkroot+'/process/*.*') - - logger.info("Killing primProc") - os.system('killall -q -u $USER PrimProc') - - logger.info ("kill controllernode and workernode") - exec_cmd(genii+'/export/bin/dbrm', "stop ") - - time.sleep(2); - logger.info ("Removing shared memory segments") - exec_cmd(genii+'/versioning/BRM/ipcs-pat', '-d') - - logger.info("Starting controllernode workernode") - exec_cmd(genii+'/export/bin/dbrm', "start ") - - logger.info("Starting primProc") - exec_cmd(genii+'/export/bin/PrimProc', "> primproc.log &") - -## run dbbuilder - add yes command at front to automatically answer questions - logger.info ("Building db and indexes (no data inserted)") - exec_cmd('yes | '+genii+'/tools/dbbuilder/dbbuilder', ' 5') - - logger.info ("Relocating OID files") - - for xmlfile in glob.glob('./Job*xml'): - logger.info ("Copying %s to %s\n"%(xmlfile, bulkroot+'/job')) - # use os.rename instead of shutil.move to avoid problems traversing devices - os.rename(xmlfile, bulkroot+'/job/'+xmlfile) - - logger.info("Using cpimport at %s"%(wedir+'/bulk/cpimport')) - exec_cmd('time '+wedir+'/bulk/cpimport', '-j 299 ') - exec_cmd(wedir+'/bulk/cpimport', '-c -j 300 ' ) - -## the following line allows either interactive use or module import -if __name__=="__main__": main() diff --git a/writeengine/bulk/bulkloadp.sh b/writeengine/bulk/bulkloadp.sh deleted file mode 100755 index 80015e12b..000000000 --- a/writeengine/bulk/bulkloadp.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash - -#This is the procedure for running bulkload using cpimport program -#Usage of this program : -#The necessary input parameter is the schema name -#For example: bulkload.sh TPCH - -#A table name and a Job ID can be entered by user when it is prompted or they can be skipped by hitting enter key -#When the table name is skipped, ALL of the columns and index in ALL of the tables in the schema will be loaded - -#When table name is entered, All of the columns and indexes in the entered table will be loaded -#Job ID will determine the names of the two xml files. For example, job id 100 will generate Job_100.xml for columns and Job_101 for index xml file. Job id for index xml file is the entered job id +1 -#if the job id is skipped, the default job ids are 299 and 300 for column and index files -#There are two xml files will be generated which reside in bulkroot directory under subdirectory job -#For example, the job directory may look like /usr/local/mariadb/columnstore/test/bulk/job - -# Set up a default search path. -PROG_NAME=$(basename $0) -SUFFIX=.tbl -TABLENAME="" -while getopts 't:j:e:s:d:p:n:u:h' OPTION -do - case ${OPTION} in - s) Schema=${OPTARG};; - t) TABLENAME=${OPTARG};; - j) JOBID=${OPTARG};; - e) MAXERROR=${OPTARG};; - p) DESC=${OPTARG};; - d) DELIMITER=${OPTARG};; - n) NAME=${OPTARG};; - u) USER=${OPTARG};; - h) echo "Options: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name -u user]" - exit 2;; - \?) echo "Options: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -s description -d delimiter -n name -u user]" - exit 2;; - esac -done - -#generate column xml file -echo "MAXERROR in $PROG_NAME =" $MAXERROR -echo "JOBID in $PROG_NAME =" $JOBID -echo "Schema is " $Schema -echo "DESC is " $DESC -echo "DELIMITER =" $DELIMITER -echo "TABLENAME is " $TABLENAME -echo "NAME is " $NAME - -if [ -n "$TABLENAME" ]; then - ./colxml $Schema -t $TABLENAME -j $JOBID -d $DELIMITER -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER -if [ "$?" <> "0" ]; then - echo "Error in colxml !" 1>&2 - exit 1 -fi -command="colxml $Schema -t $TABLENAME -j $JOBID -d $DELIMITER -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" " - echo $command -else - ./colxml $Schema -j $JOBID -d $DELIMITER -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER -if [ "$?" <> "0" ]; then - echo "Error in colxml !" 1>&2 - exit 1 -fi - command="colxml $Schema -j $JOBID -d "$DELIMITER" -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" " - echo $command -fi - -#generate index xml file -DESC="table index definition" -NAME="index definitions for tables in $Schema" -let "JOBID2 = JOBID+1" -echo "DEFAULT INDEX JOB ID is " $JOBID2 -if [ -n "$TABLENAME" ]; then - ./indxml $Schema -t $TABLENAME -j $JOBID2 -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER -if [ "$?" <> "0" ]; then - echo "Error in indxml !" 1>&2 - exit 1 -fi - - command="indxml $Schema -t $TABLENAME -j $JOBID2 -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" " - echo $command - -else - ./indxml $Schema -j $JOBID2 -s "$DESC" -e $MAXERROR -n "$NAME" -u $USER -if [ "$?" <> "0" ]; then - echo "Error in colxml !" 1>&2 - exit 1 -fi - - command="indxml $Schema -j $JOBID2 -s \"$DESC\" -e $MAXERROR -n \"$NAME\" -u \"$USER\" " - echo $command -fi -#get bulkroot -if [ -n "$CALPONT_CONFIG_FILE" ]; then - echo "CALPONT_CONFIG_FILE=" $CALPONT_CONFIG_FILE -elif [ -z "$CALPONT_CONFIG_FILE"]; then - CALPONT_CONFIG_FILE="/usr/local/mariadb/columnstore/etc/Columnstore.xml" - echo "CALPONT_CONFIG_FILE=" $CALPONT_CONFIG_FILE -else - CALPONT_CONFIG_FILE="/usr/local/mariadb/columnstore/etc/Columnstore.xml" - echo "CALPONT_CONFIG_FILE=" $CALPONT_CONFIG_FILE -fi - -awk '/BulkRoot/ { sub(//,"",$0); sub(/<\/BulkRoot>/,"",$0); sub(/" "/,"",$0);print $0 > "tmp.txt"}' $CALPONT_CONFIG_FILE -sed -e 's/ *//g' tmp.txt > out.txt - -BulkRoot=$(cat out.txt) -echo "BulkRoot=" $BulkRoot -rm -rf out.txt tmp.txt - -#bulk load column files -./cpimport -j $JOBID -command="cpimport -j $JOBID" -echo $command -#bulk load parallel index files -#./splitidx -j $JOBID2 -#IDX_SHELL_SCRIPT="$BulkRoot/process/Job_$JOBID2.sh" -#chmod +x $IDX_SHELL_SCRIPT -#echo " run parallel loading $IDX_SHELL_SCRIPT" -#$IDX_SHELL_SCRIPT - - - diff --git a/writeengine/bulk/checkidx.py b/writeengine/bulk/checkidx.py deleted file mode 100755 index be22a674b..000000000 --- a/writeengine/bulk/checkidx.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/python - -import os, sys, glob, shutil, xml.dom.minidom - -def find_paths(): - - """Find DBRoot and BulkRoot.""" - try: - config_file = os.environ['COLUMNSTORE_CONFIG_FILE'] - except KeyError: - try: - config_file = '/usr/local/mariadb/columnstore/etc/Columnstore.xml' - os.lstat(config_file) - except: - sys.exit('No config file available') - - - xmldoc = xml.dom.minidom.parse(config_file) - bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0] - db_node = xmldoc.getElementsByTagName('DBRoot')[0] - - bulk_dir = bulk_node.childNodes[0].nodeValue - data_dir = db_node.childNodes[0].nodeValue - - return (bulk_dir, data_dir) - - -def validate_indexes(job_file): - index_files = [] - xmldoc = xml.dom.minidom.parse(job_file) - - for index_node in xmldoc.getElementsByTagName('Index'): - curTreeOid = index_node.getAttribute('iTreeOid') - curListOid = index_node.getAttribute('iListOid') - curMapOid = index_node.getAttribute('mapOid') - #curIdxCmdArg = ' -t ' + curTreeOid + ' -l ' + curListOid + ' -v -c ' + curMapOid + ' > idxCol_' + curMapOid+'.out' - curIdxCmdArg = ' -t %s -l %s -v -c %s > idxCol_%s.out' % (curTreeOid, curListOid, curMapOid, curMapOid) - index_files.append( curIdxCmdArg ) - - return index_files - -def exec_cmd(cmd, args): - """Execute command using subprocess module or if that fails, - use os.system - """ - - try: - import subprocess - - try: - retcode = call(cmd + " "+args, shell=True) - if retcode < 0: - print >>sys.stderr, "Child was terminated by signal", -retcode - sys.exit(-1) - - else: - print >>sys.stderr, "Child returned", retcode - - except OSError, e: - - print >>sys.stderr, "Execution failed:", e - sys.exit(-1) - except: - res = os.system(cmd+' '+args) - if res: - sys.exit( res ) - - - -def main(): - """ - Validate indexes.. - """ - - if len(os.getenv('LD_LIBRARY_PATH'))<5: - print 'Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH') - - home = os.getenv('HOME') - genii = home+'/genii' - - (bulkroot, dbroot) = find_paths() - - if len(glob.glob(bulkroot+'/job/Job_300.xml')) == 0: - sys.exit("No Job_300.xml exist ") - - indexes = validate_indexes(bulkroot+'/job/Job_300.xml') - for idxCmdArg in indexes: - print idxCmdArg - exec_cmd( genii + '/tools/evalidx/evalidx', idxCmdArg ) - - -## the following line allows either interactive use or module import -if __name__=="__main__": main() diff --git a/writeengine/bulk/cpimport.sh b/writeengine/bulk/cpimport.sh deleted file mode 100755 index 2050fc6cf..000000000 --- a/writeengine/bulk/cpimport.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -#This is the procedure for running bulkload using cpimport program -#Usage of this program : -#The necessary input parameter is the schema name -#For example: bulkload.sh TPCH - -#A table name and a Job ID can be entered by user when it is prompted or they can be skipped by hitting enter key -#When the table name is skipped, ALL of the columns and index in ALL of the tables in the schema will be loaded - -#When table name is entered, All of the columns and indexes in the entered table will be loaded -#Job ID will determine the names of the two xml files. For example, job id 100 will generate Job_100.xml for columns and Job_101 for index xml file. Job id for index xml file is the entered job id +1 -#if the job id is skipped, the default job ids are 299 and 300 for column and index files -#There are two xml files will be generated which reside in bulkroot directory under subdirectory job -#For example, the job directory may look like /usr/local/mariadb/columnstore/test/bulk/job - -# Set up a default search path. - -#echo "This is Script name " $0 -PROG_NAME=$(basename $0) - -USERNAME=`grep "^${USER}:" /etc/passwd | cut -d: -f5` -JOBID="" -TABLENAME="" -Schema="" -DELIMITER="|" -MAXERROR=10 -FORMAT=CSV -DESC="table columns definition" -NAME="table columns definition" - - -while getopts 't:j:e:s:d:p:n:hu' OPTION -do - case ${OPTION} in - s) Schema=${OPTARG};; - t) TABLENAME=${OPTARG};; - j) JOBID=${OPTARG};; - e) MAXERROR=${OPTARG};; - p) DESC=${OPTARG};; - d) DELIMITER=${OPTARG};; - n) NAME=${OPTARG};; - h) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]" - exit 2;; - u) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]" - exit 2;; - \?) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]" - exit 2;; - esac -done - -if [ -n "$Schema" ]; then - echo "Schema is " $Schema -else - echo "Error using the script, a schema is needed! " - echo "usage as follows: " - echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]" - echo "PLEASE ONLY INPUT SCHEMA NAME:" - read Schema - if [ -n "$Schema" ]; then - echo "Schema is " $Schema - else - echo "Error using the script, a schema is needed! " - echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]" - echo "Try again! Goodbye!" - exit 2; - fi -fi -NAME="column definitions for tables in $Schema" - -if [ -n "$JOBID" ]; then - echo "INPUT JOB ID is " $JOBID -else - echo "Error using the script, a jobid is needed! " - echo "PLEASE INPUT jobid:" - read JOBID - if [ -n "$JOBID" ]; then - echo "JOBID is " $JOBID - else - echo "Error using the script, a jobid is needed! " - echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -s description -d delimiter -e max_error_rows -n name ]" - echo "Try again! Goodbye!" - exit 2; - fi -fi -################################################################################ - -if [ -n "$TABLENAME" ]; then - ./bulkloadp.sh -e $MAXERROR -s $Schema -t "$TABLENAME" -j $JOBID -p "$DESC" -d "$DELIMITER" -n "$NAME" -u $USER - -else - ./bulkloadp.sh -e $MAXERROR -s $Schema -j $JOBID -d "$DELIMITER" -p "$DESC" -n "$NAME" -u $USER -fi diff --git a/writeengine/bulk/dbload_tmplate.sh b/writeengine/bulk/dbload_tmplate.sh deleted file mode 100755 index 9f050a469..000000000 --- a/writeengine/bulk/dbload_tmplate.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -#This is the procedure for running bulkload using cpimport program -#Usage of this program : -#The necessary input parameter is the schema name -#For example: bulkload.sh TPCH - -#A table name and a Job ID can be entered by user when it is prompted or they can be skipped by hitting enter key -#When the table name is skipped, ALL of the columns and index in ALL of the tables in the schema will be loaded - -#When table name is entered, All of the columns and indexes in the entered table will be loaded -#Job ID will determine the names of the two xml files. For example, job id 100 will generate Job_100.xml for columns and Job_101 for index xml file. Job id for index xml file is the entered job id +1 -#if the job id is skipped, the default job ids are 299 and 300 for column and index files -#There are two xml files will be generated which reside in bulkroot directory under subdirectory job -#For example, the job directory may look like /usr/local/mariadb/columnstore/test/bulk/job - -# Set up a default search path. -PATH="$HOME/genii/export/bin:.:/sbin:/usr/sbin:/bin:/usr/bin:/usr/X11R6/bin" -export PATH - -#echo "This is Script name " $0 -PROG_NAME=$(basename $0) - -USERNAME=`grep "^${USER}:" /etc/passwd | cut -d: -f5` -JOBID="" -TABLENAME="" -Schema="" -DELIMITER="|" -MAXERROR=10 -FORMAT=CSV -DESC="table columns definition" -NAME="table columns definition" - - -while getopts 't:j:e:s:d:p:n:hu' OPTION -do - case ${OPTION} in - s) Schema=${OPTARG};; - t) TABLENAME=${OPTARG};; - j) JOBID=${OPTARG};; - e) MAXERROR=${OPTARG};; - p) DESC=${OPTARG};; - d) DELIMITER=${OPTARG};; - n) NAME=${OPTARG};; - h) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]" - exit 2;; - u) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]" - exit 2;; - \?) echo "Usage: ${PROG_NAME} -s schema -j jobid [-t TableName -e max_error_row -p description -d delimiter -n name ]" - exit 2;; - esac -done - -if [ -n "$Schema" ]; then - echo "Schema is " $Schema -else - echo "Error using the script, a schema is needed! " - echo "usage as follows: " - echo "Usage: ${PROG_NAME} Schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]" - echo "PLEASE ONLY INPUT SCHEMA NAME:" - read Schema - if [ -n "$Schema" ]; then - echo "Schema is " $Schema - else - echo "Error using the script, a schema is needed! " - echo "Usage: ${PROG_NAME} Schema -j jobid [-t TableName -p description -d delimiter -e max_error_rows -n name ]" - echo "Try again! Goodbye!" - exit 2; - fi -fi -NAME="column definitions for tables in $Schema" - -if [ -n "$JOBID" ]; then - echo "INPUT JOB ID is " $JOBID -else - echo "Error using the script, a jobid is needed! " - echo "PLEASE INPUT jobid:" - read JOBID - if [ -n "$JOBID" ]; then - echo "JOBID is " $JOBID - else - echo "Error using the script, a jobid is needed! " - echo "Usage: ${PROG_NAME} Schema -j jobid [-t TableName -s description -d delimiter -e max_error_rows -n name ]" - echo "Try again! Goodbye!" - exit 2; - fi -fi -################################################################################ - -if [ -n "$TABLENAME" ]; then - bulkloadp.sh -e $MAXERROR -s $Schema -t "$TABLENAME" -j $JOBID -p "$DESC" -d "$DELIMITER" -n "$NAME" -u $USER - -else - bulkloadp.sh -e $MAXERROR -s $Schema -j $JOBID -d "$DELIMITER" -p "$DESC" -n "$NAME" -u $USER -fi diff --git a/writeengine/bulk/dbloadp.sh b/writeengine/bulk/dbloadp.sh deleted file mode 100755 index 550f70a30..000000000 --- a/writeengine/bulk/dbloadp.sh +++ /dev/null @@ -1,3 +0,0 @@ -cleanup.sh -dbbuilder.sh -bulkloadp.sh diff --git a/writeengine/bulk/qa-bulkload.py b/writeengine/bulk/qa-bulkload.py deleted file mode 100644 index ea5436fd8..000000000 --- a/writeengine/bulk/qa-bulkload.py +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/python -## -## Bulkloader script by Martin Thomas -## - -import os, sys, glob, shutil, xml.dom.minidom -import getopt -import logging - -logger = logging.getLogger() -shdlr = logging.StreamHandler() -fhdlr = logging.FileHandler(filename='bulkload.log' ) -formatter = logging.Formatter('%(asctime)s:%(levelname)s: %(message)s') -shdlr.setFormatter(formatter) -fhdlr.setFormatter(formatter) -logger.addHandler(shdlr) -logger.addHandler(fhdlr) - -## only report INFO or higher - change to WARNING to silence all logging -logger.setLevel(logging.INFO) - - -def usage(): - print """ - - qa-bulkload.py is intended to automate the manual steps required to load the - database and build indexes from scratch. - - - PrimProc will be stopped and started - - shared memory sgements wil be removed using ipcs-pat - - database files will be removed - - dbgen will be run with option 5 - - oid files and job files will be copied to correct locations - - column data will be parsed and loaded using Job 299 - - index data will be exported, sorted and loaded using Job 300 - - Options: - -n or --nocache= : Specify either col or idx and the -c flag will NOT be sent to cpimport - -u or --usage : Usage message - - Example: - bulkload.py --nocache=idx - Load the database, do not use cache when building indexes - - THIS SPACE LEFT INTENTIONALLY BLANK - """ - -def find_paths(): - - """Find DBRoot and BulkRoot.""" - try: - config_file = os.environ['COLUMNSTORE_CONFIG_FILE'] - except KeyError: - try: - logger.info("Environment variable COLUMNSTORE_CONFIG_FILE not set, looking for system Columnstore.xml") - config_file = '/usr/local/mariadb/columnstore/etc/Columnstore.xml' - os.lstat(config_file) - except: - logger.error('No config file available') - sys.exit('No config file available') - try: - xmldoc = xml.dom.minidom.parse(config_file) - bulk_node = xmldoc.getElementsByTagName('BulkRoot')[0] - db_node = xmldoc.getElementsByTagName('DBRoot')[0] - bulk_dir = bulk_node.childNodes[0].nodeValue - data_dir = db_node.childNodes[0].nodeValue - - except Exception, e: - logger.error('Error parsing config file') - logger.error(e) - sys.exit('Error parsing config file') - - return (bulk_dir, data_dir) - -def check_dirs(bulkroot, dbroot): - - problem = 0 - res = 0 - reqd_dirs = { - os.getenv('HOME')+'/genii' : "No genii directory found (contains tools required to continue) (%s)", - bulkroot: "Bulkroot specified as %s but not found", - bulkroot+'/job': "No job directory found - needed to store Job xml files (looked in %s)", - bulkroot+'/data/import': "No data/import directory found - expected %s to hold data to be loaded", - bulkroot+'/log': "No data/log directory found - expected %s to log into", - dbroot : "DBroot specified as %s but not found" - } - for dir in reqd_dirs.keys(): - try: - res = os.lstat(dir) - except: - problem = 1 - logger.error(reqd_dirs[dir]%dir) - - if problem: - sys.exit(1) - -def fix_hwm(job_file): - - """Find hwm in xml file and change to 0""" - - import re - - src_file = open(job_file, 'r') - dst_file = open(job_file+'.tmp', 'w') - - rep = re.compile('hwm="1"') - - for line in src_file: - line = rep.sub('hwm="0"', line) - dst_file.write(line) - # use os.rename instead of shutil.move to avoid problems traversing devices - os.rename(job_file+'.tmp', job_file) - -def find_indexes(job_file): - - """Find index definitions in job_file and return list of files to sort""" - - index_files = [] - try: # try because we may have an old version of python - xmldoc = xml.dom.minidom.parse(job_file) - - for index_node in xmldoc.getElementsByTagName('Index'): - index_files.append(index_node.getAttribute('mapName')) - except: - import re - f = open(job_file) - for line in f.read(): - b =re.search('mapName="(CPL_[0-9A-Z_]+)"', line) - try: # try because not every line will match - index_files.append(b.group(1)) - except: pass - - return index_files - -def exec_cmd(cmd, args): - """Execute command using subprocess module or if that fails, - use os.system - """ - - try: - import subprocess - - try: - retcode = call(cmd + " "+args, shell=True) - if retcode < 0: - print >>sys.stderr, "Child was terminated by signal", -retcode - sys.exit(-1) - - else: - print >>sys.stderr, "Child returned", retcode - - except OSError, e: - - print >>sys.stderr, "Execution failed:", e - sys.exit(-1) - except: - logger.info ('Old version of Python - subprocess not available, falling back to os.system') - logger.info ('Executing: '+cmd+' '+args) - res = os.system(cmd+' '+args) - if res: - logger.error('Bad return code %i from %s'%(res, cmd)) - sys.exit( res ) - - -def build_tool(tool): - """ - Use the tool dictionary to determine if required tool exists - and build if not - """ - - if not os.path.exists(tool['path']+tool['tool']): - logger.warn ("Building %s before continuing"%tool['tool']) - curdir=os.getcwd() - os.chdir(tool['path']) - exec_cmd(tool['builder'], tool['args']) - os.chdir(curdir) - -def main(): - """ - Bulk load the database.. - Check that we can write OIDfiles, that all required tools exist, - clean up old files, sort the index inserts and generally rock and roll - """ - start_dir = curdir=os.getcwd() # remember where we started - - if not os.environ.has_key('LD_LIBRARY_PATH'): - logger.info('No environment variable LD_LIBRARY_PATH') - else: - if len(os.getenv('LD_LIBRARY_PATH'))<5: - logger.info('Suspicous LD_LIBRARY_PATH: %s'%os.getenv('LD_LIBRARY_PATH')) - - #-- figure out paths - home = os.getenv('HOME') - cache = {} - cache['idx'] = '-c' - cache['col'] = '-c' - -#-- allow us to specify a write engine branch - opts, args = getopt.getopt(sys.argv[1:], 'n:u', ['nocache=', 'usage']) - for opt, arg in opts: - - if opt == '-n' or opt == '--nocache': - if (arg=='idx' or arg=='col'): - cache[arg] = '' - logger.info("No cache for %s"% arg) - - if opt == '-u' or opt == '--usage': - usage() - sys.exit() - - (bulkroot, dbroot) = find_paths() - - logger.info ("Bulkroot: %s \tDBRoot: %s\n"%(bulkroot, dbroot)) - - check_dirs(bulkroot, dbroot) - - if len(glob.glob(bulkroot+'/data/import/*tbl')) == 0: - sys.exit("No files for import found in BulkRoot: %s"%(bulkroot)) - - if len(glob.glob(dbroot+'/000.dir'))==0: - logger.info("No files found in DBRoot: %s (not fatal)"%dbroot) - -## qa version does not build any tools. Cease and desist if any tools missing - - toolset = ['dbbuilder', 'cpimport', 'ipcs-pat', 'PrimProc'] - for tool in toolset: - try: - res = os.system('which %s'%tool) - finally: - if res: - logger.error("Fatal error: %s not found"%tool) - sys.exit(-1) - - - -## clean up before starting -## remove old db files, removed old temp files, remove shared memory segments, -## kill old PrimProc and start new one - - logger.info ("Removing old DB files") - exec_cmd('rm -fr ', dbroot+'/000.dir') - - logger.info ("Removing old temp files") - exec_cmd('rm -fr ', bulkroot+'/data/import/*.idx.txt') - - logger.info ("Removing shared memory segments") - exec_cmd('ipcs-pat', '-d') - - logger.info("Killing primProc") - os.system('killall -q -u $USER PrimProc') - - logger.info("Starting primProc") - exec_cmd('PrimProc', "> primproc.log &") - -## run dbbuilder - logger.info ("Building db and indexes (no data inserted)") - exec_cmd('yes | dbbuilder', ' 5') - - logger.info ("Relocating OID files") - for file in ['colOIDFile.dat', 'dicOIDFile.dat', 'indexOIDFile.dat']: - # use os.rename instead of shutil.move to avoid problems traversing devices - os.rename(file, dbroot+'/'+file) - - for xmlfile in glob.glob('./Job*xml'): - logger.info ("Copying %s to %s\n"%(xmlfile, bulkroot+'/job')) - # use os.rename instead of shutil.move to avoid problems traversing devices - os.rename(xmlfile, bulkroot+'/job/'+xmlfile) - - exec_cmd('time cpimport', '-j 299 -b %s'%cache['col']) - exec_cmd('time cpimport', '-j 299 -l %s'%cache['col']) - - exec_cmd('time cpimport', '-j 300 -i -o %s'%cache['idx']) - - logger.info("Over-riding HWM in job file - setting to 0") - fix_hwm(bulkroot+'/job/Job_300.xml') - - ## sort the files after scanning index job file for mapName(s) - logger.info ("Sorting indexes before insertion") - indexes = find_indexes(bulkroot+'/job/Job_300.xml') - for index in indexes: - data_file='%s/data/import/%s.dat.idx.txt'%(bulkroot, index) - sort_file ='%s/data/import/%s.dat.idx.sort'%(bulkroot, index) - exec_cmd('time sort',' -k1 -n %s > %s'%(data_file, sort_file)) - # use os.rename instead of shutil.move to avoid problems traversing devices - os.rename( sort_file, data_file) - - logger.info("Inserting indexes") - try: - logger.info("Trying with -m option") - exec_cmd('cpimport', '-j 300 -m -i -s %s'%cache['idx']) - except: - try: - logger.warn("cpimport with -m option failed, fall back to regular options") - exec_cmd('cpimport', '-j 300 -i -s %s'%cache['idx']) - except: - logger.error("Index load failed") - -## the following line allows either interactive use or module import -if __name__=="__main__": main()