From f32eb9b94ecf01d12f53a1bd6addeba81e811d7f Mon Sep 17 00:00:00 2001 From: Cynthia Shang Date: Thu, 21 Jan 2021 15:21:50 -0500 Subject: [PATCH] Partial multi-repository implementation. Multi-repository implementations for the archive-push, check, info, stanza-create, stanza-upgrade, and stanza-delete commands. Multi-repo configuration is disabled so there should be no behavioral changes between these commands and their current single-repo implementations. Multi-repo documentation and integration tests are still in the multi-repo development branch. All unit tests work as multi-repo since they are able to bypass the configuration restrictions. --- build/lib/pgBackRestBuild/Config/Data.pm | 300 ++- doc/lib/pgBackRestDoc/Common/DocConfig.pm | 2 + doc/xml/dtd/doc.dtd | 1 + doc/xml/release.xml | 9 + src/command/archive/push/file.c | 152 +- src/command/archive/push/file.h | 15 +- src/command/archive/push/protocol.c | 25 +- src/command/archive/push/push.c | 121 +- src/command/check/check.c | 93 +- src/command/help/help.auto.c | 4 + src/command/help/help.c | 8 +- src/command/info/info.c | 1561 ++++++++----- src/command/stanza/create.c | 196 +- src/command/stanza/delete.c | 6 +- src/command/stanza/upgrade.c | 124 +- src/config/config.auto.c | 1 + src/config/config.auto.h | 5 +- src/config/config.c | 3 + src/config/config.intern.h | 1 + src/config/load.c | 72 +- src/config/parse.auto.c | 1349 ++++++++++- src/config/parse.c | 22 +- src/protocol/helper.c | 30 +- src/protocol/helper.h | 1 + test/define.yaml | 2 +- test/expect/mock-all-001.log | 143 +- test/expect/mock-all-002.log | 66 +- test/expect/mock-archive-001.log | 11 +- test/expect/mock-archive-002.log | 11 +- test/expect/mock-archive-stop-001.log | 5 +- test/expect/mock-archive-stop-002.log | 1 + test/expect/mock-stanza-001.log | 37 +- test/expect/mock-stanza-002.log | 35 +- test/expect/real-all-001.log | 68 +- test/lib/pgBackRestTest/Common/StorageRepo.pm | 24 +- .../pgBackRestTest/Env/Host/HostBackupTest.pm | 238 +- test/lib/pgBackRestTest/Env/HostEnvTest.pm | 9 +- .../pgBackRestTest/Module/Real/RealAllTest.pm | 52 +- test/src/module/command/archiveGetTest.c | 19 +- test/src/module/command/archivePushTest.c | 244 +- test/src/module/command/backupTest.c | 10 +- test/src/module/command/checkTest.c | 159 +- test/src/module/command/expireTest.c | 63 +- test/src/module/command/infoTest.c | 2001 ++++++++++++++--- test/src/module/command/remoteTest.c | 1 + test/src/module/command/restoreTest.c | 4 +- test/src/module/command/stanzaTest.c | 496 ++-- test/src/module/command/verifyTest.c | 22 +- test/src/module/config/loadTest.c | 138 +- test/src/module/config/parseTest.c | 1 + test/src/module/protocol/protocolTest.c | 38 +- test/src/module/storage/remoteTest.c | 7 +- 52 files changed, 6103 insertions(+), 1903 deletions(-) diff --git a/build/lib/pgBackRestBuild/Config/Data.pm b/build/lib/pgBackRestBuild/Config/Data.pm index d177a14b3..db827a6df 100644 --- a/build/lib/pgBackRestBuild/Config/Data.pm +++ b/build/lib/pgBackRestBuild/Config/Data.pm @@ -219,11 +219,14 @@ use constant CFGOPT_LOG_TIMESTAMP => 'log-time # Repository options #----------------------------------------------------------------------------------------------------------------------------------- # Determines how many repositories can be configured -use constant CFGDEF_INDEX_REPO => 1; +use constant CFGDEF_INDEX_REPO => 4; # Prefix that must be used by all repo options that allow multiple configurations use constant CFGDEF_PREFIX_REPO => 'repo'; +# Set default repository +use constant CFGOPT_REPO => CFGDEF_PREFIX_REPO; + # Repository General use constant CFGOPT_REPO_CIPHER_TYPE => CFGDEF_PREFIX_REPO . '-cipher-type'; use constant CFGOPT_REPO_CIPHER_PASS => CFGDEF_PREFIX_REPO . '-cipher-pass'; @@ -1686,6 +1689,147 @@ my %hConfigDefine = } }, + # Repository selector + #------------------------------------------------------------------------------------------------------------------------------- + &CFGOPT_REPO => + { + &CFGDEF_TYPE => CFGDEF_TYPE_INTEGER, + &CFGDEF_INTERNAL => true, + &CFGDEF_REQUIRED => false, + &CFGDEF_ALLOW_RANGE => [1, CFGDEF_INDEX_REPO], + &CFGDEF_COMMAND => + { + &CFGCMD_ARCHIVE_GET => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_ASYNC => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_ARCHIVE_PUSH => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_BACKUP => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_LOCAL => {}, + }, + }, + &CFGCMD_CHECK => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_EXPIRE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + }, + }, + &CFGCMD_INFO => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_CREATE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_GET => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_LS => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_PUT => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_RM => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_RESTORE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_STANZA_CREATE => + { + &CFGDEF_COMMAND_ROLE => {}, + }, + &CFGCMD_STANZA_DELETE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + }, + }, + &CFGCMD_STANZA_UPGRADE => + { + &CFGDEF_COMMAND_ROLE => {}, + }, + &CFGCMD_START => + { + &CFGDEF_COMMAND_ROLE => {}, + }, + &CFGCMD_STOP => + { + &CFGDEF_COMMAND_ROLE => {}, + }, + &CFGCMD_VERIFY => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + }, + }, + # Repository options #------------------------------------------------------------------------------------------------------------------------------- &CFGOPT_REPO_CIPHER_PASS => @@ -2336,22 +2480,144 @@ my %hConfigDefine = }, &CFGDEF_COMMAND => { - &CFGCMD_ARCHIVE_GET => {}, - &CFGCMD_ARCHIVE_PUSH => {}, - &CFGCMD_BACKUP => {}, - &CFGCMD_CHECK => {}, - &CFGCMD_EXPIRE => {}, - &CFGCMD_INFO => {}, - &CFGCMD_REPO_CREATE => {}, - &CFGCMD_REPO_GET => {}, - &CFGCMD_REPO_LS => {}, - &CFGCMD_REPO_PUT => {}, - &CFGCMD_REPO_RM => {}, - &CFGCMD_RESTORE => {}, - &CFGCMD_STANZA_CREATE => {}, - &CFGCMD_STANZA_DELETE => {}, - &CFGCMD_STANZA_UPGRADE => {}, - &CFGCMD_VERIFY => {}, + &CFGCMD_ARCHIVE_GET => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_ASYNC => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_ARCHIVE_PUSH => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_ASYNC => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_BACKUP => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_LOCAL => {}, + }, + }, + &CFGCMD_CHECK => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_EXPIRE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + }, + }, + &CFGCMD_INFO => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_CREATE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_GET => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_LS => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_PUT => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_REPO_RM => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_RESTORE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, + &CFGCMD_STANZA_CREATE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + }, + }, + &CFGCMD_STANZA_DELETE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + }, + }, + &CFGCMD_STANZA_UPGRADE => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + }, + }, + &CFGCMD_START => + { + &CFGDEF_COMMAND_ROLE => {}, + }, + &CFGCMD_STOP => + { + &CFGDEF_COMMAND_ROLE => {}, + }, + &CFGCMD_VERIFY => + { + &CFGDEF_COMMAND_ROLE => + { + &CFGCMD_ROLE_DEFAULT => {}, + &CFGCMD_ROLE_LOCAL => {}, + &CFGCMD_ROLE_REMOTE => {}, + }, + }, }, }, diff --git a/doc/lib/pgBackRestDoc/Common/DocConfig.pm b/doc/lib/pgBackRestDoc/Common/DocConfig.pm index 3de4e41a4..eda83c28c 100644 --- a/doc/lib/pgBackRestDoc/Common/DocConfig.pm +++ b/doc/lib/pgBackRestDoc/Common/DocConfig.pm @@ -282,6 +282,8 @@ sub process $oOptionDoc = $oDoc->nodeGet('operation')->nodeGet('operation-general')->nodeGet('option-list') ->nodeGetById('option', $strOption, false); + $strSection = $oOptionDoc->paramGet('section', false); + $strOptionSource = CONFIG_HELP_SOURCE_DEFAULT if (defined($oOptionDoc)); } } diff --git a/doc/xml/dtd/doc.dtd b/doc/xml/dtd/doc.dtd index 88de9f34b..1f7d6d920 100644 --- a/doc/xml/dtd/doc.dtd +++ b/doc/xml/dtd/doc.dtd @@ -33,6 +33,7 @@ + diff --git a/doc/xml/release.xml b/doc/xml/release.xml index 785b1fe5d..42488d7e5 100644 --- a/doc/xml/release.xml +++ b/doc/xml/release.xml @@ -36,6 +36,15 @@ + + + + + + +

Partial multi-repository implementation.

+
+ diff --git a/src/command/archive/push/file.c b/src/command/archive/push/file.c index e9de5d942..680caf304 100644 --- a/src/command/archive/push/file.c +++ b/src/command/archive/push/file.c @@ -19,29 +19,30 @@ Archive Push File /**********************************************************************************************************************************/ String * archivePushFile( - const String *walSource, const String *archiveId, unsigned int pgVersion, uint64_t pgSystemId, const String *archiveFile, - CipherType cipherType, const String *cipherPass, CompressType compressType, int compressLevel) + const String *walSource, unsigned int pgVersion, uint64_t pgSystemId, const String *archiveFile, CompressType compressType, + int compressLevel, const ArchivePushFileRepoData *repoData) { FUNCTION_LOG_BEGIN(logLevelDebug); FUNCTION_LOG_PARAM(STRING, walSource); - FUNCTION_LOG_PARAM(STRING, archiveId); FUNCTION_LOG_PARAM(UINT, pgVersion); FUNCTION_LOG_PARAM(UINT64, pgSystemId); FUNCTION_LOG_PARAM(STRING, archiveFile); - FUNCTION_LOG_PARAM(ENUM, cipherType); - FUNCTION_TEST_PARAM(STRING, cipherPass); FUNCTION_LOG_PARAM(ENUM, compressType); FUNCTION_LOG_PARAM(INT, compressLevel); + FUNCTION_LOG_PARAM_P(VOID, repoData); FUNCTION_LOG_END(); ASSERT(walSource != NULL); - ASSERT(archiveId != NULL); ASSERT(archiveFile != NULL); + ASSERT(repoData != NULL); String *result = NULL; MEM_CONTEXT_TEMP_BEGIN() { + // Total repos to push files to + unsigned int repoTotal = cfgOptionGroupIdxTotal(cfgOptGrpRepo); + // Is this a WAL segment? bool isSegment = walIsSegment(archiveFile); @@ -63,11 +64,19 @@ archivePushFile( // Set archive destination initially to the archive file, this will be updated later for wal segments String *archiveDestination = strDup(archiveFile); - // Get wal segment checksum and compare it to what exists in the repo, if any - String *walSegmentFile = NULL; + // Assume that all repos need a copy of the archive file + bool destinationCopyAny = true; + bool *destinationCopy = memNew(sizeof(bool) * repoTotal); + for (unsigned int repoIdx = 0; repoIdx < repoTotal; repoIdx++) + destinationCopy[repoIdx] = true; + + // Get wal segment checksum and compare it to what exists in the repo, if any if (isSegment) { + // Assume that no repos need a copy of the WAL segment and update when a repo needing a copy is found + destinationCopyAny = false; + // Generate a sha1 checksum for the wal segment IoRead *read = storageReadIo(storageNewReadP(storageLocal(), walSource)); ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR)); @@ -75,35 +84,60 @@ archivePushFile( const String *walSegmentChecksum = varStr(ioFilterGroupResult(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE_STR)); - // If the wal segment already exists in the repo then compare checksums - walSegmentFile = walSegmentFind(storageRepo(), archiveId, archiveFile, 0); - - if (walSegmentFile != NULL) + // Check each repo for the WAL segment + for (unsigned int repoIdx = 0; repoIdx < repoTotal; repoIdx++) { - String *walSegmentRepoChecksum = strSubN(walSegmentFile, strSize(archiveFile) + 1, HASH_TYPE_SHA1_SIZE_HEX); + // If the wal segment already exists in the repo then compare checksums + const String *walSegmentFile = walSegmentFind(storageRepoIdx(repoIdx), repoData[repoIdx].archiveId, archiveFile, 0); - if (strEq(walSegmentChecksum, walSegmentRepoChecksum)) + if (walSegmentFile != NULL) { - MEM_CONTEXT_PRIOR_BEGIN() + String *walSegmentRepoChecksum = strSubN(walSegmentFile, strSize(archiveFile) + 1, HASH_TYPE_SHA1_SIZE_HEX); + + // If the checksums are the same then succeed but warn in case this is a symptom of some other issue + if (strEq(walSegmentChecksum, walSegmentRepoChecksum)) { - result = strNewFmt( - "WAL file '%s' already exists in the archive with the same checksum" - "\nHINT: this is valid in some recovery scenarios but may also indicate a problem.", - strZ(archiveFile)); + MEM_CONTEXT_PRIOR_BEGIN() + { + // Add LF if there has already been a warning + if (result == NULL) + result = strNew(""); + else + strCatZ(result, "\n"); + + // Add warning to the result that will be returned to the main process + strCatFmt( + result, + "WAL file '%s' already exists in the repo%u archive with the same checksum" + "\nHINT: this is valid in some recovery scenarios but may also indicate a problem.", + strZ(archiveFile), cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); + } + MEM_CONTEXT_PRIOR_END(); + + // No need to copy to this repo + destinationCopy[repoIdx] = false; + } + // Else error so we don't overwrite the existing segment + else + { + THROW_FMT( + ArchiveDuplicateError, "WAL file '%s' already exists in the repo%u archive with a different checksum", + strZ(archiveFile), cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); } - MEM_CONTEXT_PRIOR_END(); } + // Else the repo needs a copy else - THROW_FMT(ArchiveDuplicateError, "WAL file '%s' already exists in the archive", strZ(archiveFile)); + destinationCopyAny = true; } // Append the checksum to the archive destination strCatFmt(archiveDestination, "-%s", strZ(walSegmentChecksum)); } - // Only copy if the file was not found in the archive - if (walSegmentFile == NULL) + // Copy the file if one or more repos require it + if (destinationCopyAny) { + // Source file is read once and copied to all repos StorageRead *source = storageNewReadP(storageLocal(), walSource); // Is the file compressible during the copy? @@ -117,21 +151,69 @@ archivePushFile( compressible = false; } - // If there is a cipher then add the encrypt filter - if (cipherType != cipherTypeNone) + // Initialize per-repo destination files + StorageWrite **destination = memNew(sizeof(StorageWrite *) * repoTotal); + + for (unsigned int repoIdx = 0; repoIdx < repoTotal; repoIdx++) { - ioFilterGroupAdd( - ioReadFilterGroup(storageReadIo(source)), - cipherBlockNew(cipherModeEncrypt, cipherType, BUFSTR(cipherPass), NULL)); - compressible = false; + // Does this repo need a copy? + if (destinationCopy[repoIdx]) + { + // Create destination file + destination[repoIdx] = storageNewWriteP( + storageRepoIdxWrite(repoIdx), + strNewFmt(STORAGE_REPO_ARCHIVE "/%s/%s", strZ(repoData[repoIdx].archiveId), strZ(archiveDestination)), + .compressible = compressible); + + // If there is a cipher then add the encrypt filter + if (repoData[repoIdx].cipherType != cipherTypeNone) + { + ioFilterGroupAdd( + ioWriteFilterGroup(storageWriteIo(destination[repoIdx])), + cipherBlockNew( + cipherModeEncrypt, repoData[repoIdx].cipherType, BUFSTR(repoData[repoIdx].cipherPass), NULL)); + } + } } - // Copy the file - storageCopyP( - source, - storageNewWriteP( - storageRepoWrite(), strNewFmt(STORAGE_REPO_ARCHIVE "/%s/%s", strZ(archiveId), strZ(archiveDestination)), - .compressible = compressible)); + // Open source file + ioReadOpen(storageReadIo(source)); + + // Open the destination files now that we know the source file exists and is readable + for (unsigned int repoIdx = 0; repoIdx < repoTotal; repoIdx++) + { + if (destinationCopy[repoIdx]) + ioWriteOpen(storageWriteIo(destination[repoIdx])); + } + + // Copy data from source to destination + Buffer *read = bufNew(ioBufferSize()); + + do + { + // Read from source + ioRead(storageReadIo(source), read); + + // Write to each destination + for (unsigned int repoIdx = 0; repoIdx < repoTotal; repoIdx++) + { + if (destinationCopy[repoIdx]) + ioWrite(storageWriteIo(destination[repoIdx]), read); + } + + // Clear buffer + bufUsedZero(read); + } + while (!ioReadEof(storageReadIo(source))); + + // Close the source and destination files + ioReadClose(storageReadIo(source)); + + for (unsigned int repoIdx = 0; repoIdx < repoTotal; repoIdx++) + { + if (destinationCopy[repoIdx]) + ioWriteClose(storageWriteIo(destination[repoIdx])); + } } } MEM_CONTEXT_TEMP_END(); diff --git a/src/command/archive/push/file.h b/src/command/archive/push/file.h index 5222881e0..1b0245be3 100644 --- a/src/command/archive/push/file.h +++ b/src/command/archive/push/file.h @@ -9,12 +9,23 @@ Archive Push File #include "common/type/string.h" #include "storage/storage.h" +/*********************************************************************************************************************************** +Structure to hold information for each repository the archive file will be pushed to. An array of these must be passed to +archivePushFile() with size equal to cfgOptionGroupIdxTotal(cfgOptGrpRepo). +***********************************************************************************************************************************/ +typedef struct ArchivePushFileRepoData +{ + const String *archiveId; + CipherType cipherType; + const String *cipherPass; +} ArchivePushFileRepoData; + /*********************************************************************************************************************************** Functions ***********************************************************************************************************************************/ // Copy a file from the source to the archive String *archivePushFile( - const String *walSource, const String *archiveId, unsigned int pgVersion, uint64_t pgSystemId, const String *archiveFile, - CipherType cipherType, const String *cipherPass, CompressType compressType, int compressLevel); + const String *walSource, unsigned int pgVersion, uint64_t pgSystemId, const String *archiveFile, CompressType compressType, + int compressLevel, const ArchivePushFileRepoData *repoData); #endif diff --git a/src/command/archive/push/protocol.c b/src/command/archive/push/protocol.c index cda32af84..52764b8ff 100644 --- a/src/command/archive/push/protocol.c +++ b/src/command/archive/push/protocol.c @@ -36,14 +36,31 @@ archivePushProtocol(const String *command, const VariantList *paramList, Protoco { if (strEq(command, PROTOCOL_COMMAND_ARCHIVE_PUSH_STR)) { + const unsigned int paramFixed = 6; // Fixed params before the repo param array + const unsigned int paramRepo = 3; // Parameters in each index of the repo array + + // Check that the correct number of repo parameters were passed + CHECK(varLstSize(paramList) - paramFixed == cfgOptionGroupIdxTotal(cfgOptGrpRepo) * paramRepo); + + // Build the repo data array + ArchivePushFileRepoData *repoData = memNew(cfgOptionGroupIdxTotal(cfgOptGrpRepo) * sizeof(ArchivePushFileRepoData)); + + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + { + repoData[repoIdx].archiveId = varStr(varLstGet(paramList, paramFixed + (repoIdx * paramRepo))); + repoData[repoIdx].cipherType = (CipherType)varUIntForce( + varLstGet(paramList, paramFixed + (repoIdx * paramRepo) + 1)); + repoData[repoIdx].cipherPass = varStr(varLstGet(paramList, paramFixed + (repoIdx * paramRepo) + 2)); + } + + // Push the file protocolServerResponse( server, VARSTR( archivePushFile( - varStr(varLstGet(paramList, 0)), varStr(varLstGet(paramList, 1)), - varUIntForce(varLstGet(paramList, 2)), varUInt64(varLstGet(paramList, 3)), varStr(varLstGet(paramList, 4)), - (CipherType)varUIntForce(varLstGet(paramList, 5)), varStr(varLstGet(paramList, 6)), - (CompressType)varUIntForce(varLstGet(paramList, 7)), varIntForce(varLstGet(paramList, 8))))); + varStr(varLstGet(paramList, 0)), varUIntForce(varLstGet(paramList, 1)), varUInt64(varLstGet(paramList, 2)), + varStr(varLstGet(paramList, 3)), (CompressType)varUIntForce(varLstGet(paramList, 4)), + varIntForce(varLstGet(paramList, 5)), repoData))); } else found = false; diff --git a/src/command/archive/push/push.c b/src/command/archive/push/push.c index c41ce2c3c..af6ec2b29 100644 --- a/src/command/archive/push/push.c +++ b/src/command/archive/push/push.c @@ -20,6 +20,7 @@ Archive Push Command #include "config/exec.h" #include "info/infoArchive.h" #include "postgres/interface.h" +#include "postgres/version.h" #include "protocol/helper.h" #include "protocol/parallel.h" #include "storage/helper.h" @@ -190,55 +191,73 @@ typedef struct ArchivePushCheckResult { unsigned int pgVersion; // PostgreSQL version uint64_t pgSystemId; // PostgreSQL system id - String *archiveId; // Archive id for current pg version - String *archiveCipherPass; // Archive cipher passphrase + ArchivePushFileRepoData *repoData; // Data for each repo } ArchivePushCheckResult; static ArchivePushCheckResult -archivePushCheck(bool pgPathSet, CipherType cipherType, const String *cipherPass) +archivePushCheck(bool pgPathSet) { FUNCTION_LOG_BEGIN(logLevelDebug); FUNCTION_LOG_PARAM(BOOL, pgPathSet); - FUNCTION_LOG_PARAM(ENUM, cipherType); - FUNCTION_TEST_PARAM(STRING, cipherPass); FUNCTION_LOG_END(); - ArchivePushCheckResult result = {0}; + ArchivePushCheckResult result = {.repoData = memNew(cfgOptionGroupIdxTotal(cfgOptGrpRepo) * sizeof(ArchivePushFileRepoData))}; MEM_CONTEXT_TEMP_BEGIN() { - // Attempt to load the archive info file - InfoArchive *info = infoArchiveLoadFile(storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherType, cipherPass); - - // Get archive id for the most recent version -- archive-push will only operate against the most recent version - String *archiveId = infoPgArchiveId(infoArchivePg(info), infoPgDataCurrentId(infoArchivePg(info))); - InfoPgData archiveInfo = infoPgData(infoArchivePg(info), infoPgDataCurrentId(infoArchivePg(info))); - - // Ensure that stanza version and system identifier match pg_control when available + // If we have access to pg_control then load it to get the pg version and system id. If we can't load pg_control then we'll + // still compare the pg info stored in the repo to the WAL segment and also all the repos against each other. if (pgPathSet) { // Get info from pg_control - PgControl controlInfo = pgControlFromFile(storagePg()); - - if (controlInfo.version != archiveInfo.version || controlInfo.systemId != archiveInfo.systemId) - { - THROW_FMT( - ArchiveMismatchError, - "PostgreSQL version %s, system-id %" PRIu64 " do not match stanza version %s, system-id %" PRIu64 - "\nHINT: are you archiving to the correct stanza?", - strZ(pgVersionToStr(controlInfo.version)), controlInfo.systemId, strZ(pgVersionToStr(archiveInfo.version)), - archiveInfo.systemId); - } + PgControl pgControl = pgControlFromFile(storagePg()); + result.pgVersion = pgControl.version; + result.pgSystemId = pgControl.systemId; } - MEM_CONTEXT_PRIOR_BEGIN() + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) { - result.pgVersion = archiveInfo.version; - result.pgSystemId = archiveInfo.systemId; - result.archiveId = strDup(archiveId); - result.archiveCipherPass = strDup(infoArchiveCipherPass(info)); + // Get the repo storage in case it is remote and encryption settings need to be pulled down + storageRepoIdx(repoIdx); + + // Set cipher type in repo data + result.repoData[repoIdx].cipherType = cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)); + + // Attempt to load the archive info file + InfoArchive *info = infoArchiveLoadFile( + storageRepoIdx(repoIdx), INFO_ARCHIVE_PATH_FILE_STR, result.repoData[repoIdx].cipherType, + cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + + // Get archive id for the most recent version -- archive-push will only operate against the most recent version + String *archiveId = infoPgArchiveId(infoArchivePg(info), infoPgDataCurrentId(infoArchivePg(info))); + InfoPgData archiveInfo = infoPgData(infoArchivePg(info), infoPgDataCurrentId(infoArchivePg(info))); + + // Ensure that stanza version and system identifier match pg_control when available or the other repos when pg_control + // is not available + if (pgPathSet || repoIdx > 0) + { + if (result.pgVersion != archiveInfo.version || result.pgSystemId != archiveInfo.systemId) + { + THROW_FMT( + ArchiveMismatchError, + "%s version %s, system-id %" PRIu64 " do not match %s stanza version %s, system-id %" PRIu64 + "\nHINT: are you archiving to the correct stanza?", + pgPathSet ? PG_NAME : strZ(strNewFmt("repo%u stanza", cfgOptionGroupIdxToKey(cfgOptGrpRepo, 0))), + strZ(pgVersionToStr(result.pgVersion)), result.pgSystemId, + strZ(strNewFmt("repo%u", cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx))), + strZ(pgVersionToStr(archiveInfo.version)), archiveInfo.systemId); + } + } + + MEM_CONTEXT_PRIOR_BEGIN() + { + result.pgVersion = archiveInfo.version; + result.pgSystemId = archiveInfo.systemId; + result.repoData[repoIdx].archiveId = strDup(archiveId); + result.repoData[repoIdx].cipherPass = strDup(infoArchiveCipherPass(info)); + } + MEM_CONTEXT_PRIOR_END(); } - MEM_CONTEXT_PRIOR_END(); } MEM_CONTEXT_TEMP_END(); @@ -354,19 +373,13 @@ cmdArchivePush(void) // Else push the file else { - // Get the repo storage in case it is remote and encryption settings need to be pulled down - storageRepo(); - - // Get archive info - ArchivePushCheckResult archiveInfo = archivePushCheck( - cfgOptionTest(cfgOptPgPath), cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); + // Check archive info for each repo + ArchivePushCheckResult archiveInfo = archivePushCheck(cfgOptionTest(cfgOptPgPath)); // Push the file to the archive String *warning = archivePushFile( - walFile, archiveInfo.archiveId, archiveInfo.pgVersion, archiveInfo.pgSystemId, archiveFile, - cipherType(cfgOptionStr(cfgOptRepoCipherType)), archiveInfo.archiveCipherPass, - compressTypeEnum(cfgOptionStr(cfgOptCompressType)), cfgOptionInt(cfgOptCompressLevel)); + walFile, archiveInfo.pgVersion, archiveInfo.pgSystemId, archiveFile, + compressTypeEnum(cfgOptionStr(cfgOptCompressType)), cfgOptionInt(cfgOptCompressLevel), archiveInfo.repoData); // If a warning was returned then log it if (warning != NULL) @@ -388,13 +401,13 @@ typedef struct ArchivePushAsyncData const String *walPath; // Path to pg_wal/pg_xlog const StringList *walFileList; // List of wal files to process unsigned int walFileIdx; // Current index in the list to be processed - CipherType cipherType; // Cipher type CompressType compressType; // Type of compression for WAL segments int compressLevel; // Compression level for wal files ArchivePushCheckResult archiveInfo; // Archive info } ArchivePushAsyncData; -static ProtocolParallelJob *archivePushAsyncCallback(void *data, unsigned int clientIdx) +static ProtocolParallelJob * +archivePushAsyncCallback(void *data, unsigned int clientIdx) { FUNCTION_TEST_BEGIN(); FUNCTION_TEST_PARAM_P(VOID, data); @@ -414,15 +427,20 @@ static ProtocolParallelJob *archivePushAsyncCallback(void *data, unsigned int cl ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_ARCHIVE_PUSH_STR); protocolCommandParamAdd(command, VARSTR(strNewFmt("%s/%s", strZ(jobData->walPath), strZ(walFile)))); - protocolCommandParamAdd(command, VARSTR(jobData->archiveInfo.archiveId)); protocolCommandParamAdd(command, VARUINT(jobData->archiveInfo.pgVersion)); protocolCommandParamAdd(command, VARUINT64(jobData->archiveInfo.pgSystemId)); protocolCommandParamAdd(command, VARSTR(walFile)); - protocolCommandParamAdd(command, VARUINT(jobData->cipherType)); - protocolCommandParamAdd(command, VARSTR(jobData->archiveInfo.archiveCipherPass)); protocolCommandParamAdd(command, VARUINT(jobData->compressType)); protocolCommandParamAdd(command, VARINT(jobData->compressLevel)); + // Add data for each repo to push to + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + { + protocolCommandParamAdd(command, VARSTR(jobData->archiveInfo.repoData[repoIdx].archiveId)); + protocolCommandParamAdd(command, VARUINT(jobData->archiveInfo.repoData[repoIdx].cipherType)); + protocolCommandParamAdd(command, VARSTR(jobData->archiveInfo.repoData[repoIdx].cipherPass)); + } + FUNCTION_TEST_RETURN(protocolParallelJobNew(VARSTR(walFile), command)); } @@ -486,15 +504,8 @@ cmdArchivePushAsync(void) // Else continue processing else { - // Get the repo storage in case it is remote and encryption settings need to be pulled down - storageRepo(); - - // Get cipher type - jobData.cipherType = cipherType(cfgOptionStr(cfgOptRepoCipherType)); - - // Get archive info - jobData.archiveInfo = archivePushCheck( - true, cipherType(cfgOptionStr(cfgOptRepoCipherType)), cfgOptionStrNull(cfgOptRepoCipherPass)); + // Check archive info for each repo + jobData.archiveInfo = archivePushCheck(true); // Create the parallel executor ProtocolParallel *parallelExec = protocolParallelNew( diff --git a/src/command/check/check.c b/src/command/check/check.c index fcc186be4..1338a0df9 100644 --- a/src/command/check/check.c +++ b/src/command/check/check.c @@ -53,17 +53,23 @@ checkStandby(const DbGetResult dbGroup, unsigned int pgPathDefinedTotal) // If a standby is defined, check the configuration if (dbGroup.standby != NULL) { - // If primary was not found + // If primary was not found (only have 1 pg configured locally, and we want to still run because this is a standby) if (dbGroup.primary == NULL) { - // If the repo is local or more than one pg-path is found then a master should have been found so error - if (repoIsLocal(cfgOptionGroupIdxDefault(cfgOptGrpRepo)) || pgPathDefinedTotal > 1) + // If any repo is local or more than one pg-path is found then a master should have been found so error + bool error = pgPathDefinedTotal > 1; + unsigned int repoIdx = 0; + + while (!error && repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo)) { - THROW( - ConfigError, - "primary database not found\n" - "HINT: check indexed pg-path/pg-host configurations"); + if (repoIsLocal(repoIdx)) + error = true; + + repoIdx++; } + + if (error) + THROW(ConfigError, "primary database not found\nHINT: check indexed pg-path/pg-host configurations"); } // Validate the standby database config @@ -72,13 +78,19 @@ checkStandby(const DbGetResult dbGroup, unsigned int pgPathDefinedTotal) // Check the user configured path and version against the database checkDbConfig(pgControl.version, dbGroup.standbyIdx, dbGroup.standby, true); - // Get the repo storage in case it is remote and encryption settings need to be pulled down (performed here for testing) - storageRepo(); + // Check each repository configured + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + { + LOG_INFO_FMT(CFGCMD_CHECK " repo%u (standby)", cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); - // Check that the backup and archive info files exist and are valid for the current database of the stanza - checkStanzaInfoPg( - storageRepo(), pgControl.version, pgControl.systemId, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); + // Get the repo storage in case it is remote and encryption settings need to be pulled down (performed here for testing) + const Storage *storageRepo = storageRepoIdx(repoIdx); + + // Check that the backup and archive info files exist and are valid for the current database of the stanza + checkStanzaInfoPg( + storageRepo, pgControl.version, pgControl.systemId, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), + cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + } LOG_INFO("switch wal not performed because this is a standby"); @@ -110,31 +122,48 @@ checkPrimary(const DbGetResult dbGroup) // Check the user configured path and version against the database checkDbConfig(pgControl.version, dbGroup.primaryIdx, dbGroup.primary, false); - // Get the repo storage in case it is remote and encryption settings need to be pulled down (performed here for testing) - storageRepo(); + // Check configuration of each repo + const String **repoArchiveId = memNew(sizeof(String *) * cfgOptionGroupIdxTotal(cfgOptGrpRepo)); - // Check that the backup and archive info files exist and are valid for the current database of the stanza - checkStanzaInfoPg( - storageRepo(), pgControl.version, pgControl.systemId, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + { + LOG_INFO_FMT(CFGCMD_CHECK " repo%u configuration (primary)", cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); - // Attempt to load the archive info file and retrieve the archiveId - InfoArchive *archiveInfo = infoArchiveLoadFile( - storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - const String *archiveId = infoArchiveId(archiveInfo); + // Get the repo storage in case it is remote and encryption settings need to be pulled down (performed here for testing) + const Storage *storageRepo = storageRepoIdx(repoIdx); + + // Check that the backup and archive info files exist and are valid for the current database of the stanza + checkStanzaInfoPg( + storageRepo, pgControl.version, pgControl.systemId, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), + cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + + // Attempt to load the archive info file and retrieve the archiveId + InfoArchive *archiveInfo = infoArchiveLoadFile( + storageRepo, INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), + cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + + repoArchiveId[repoIdx] = infoArchiveId(archiveInfo); + } // Perform a WAL switch const String *walSegment = dbWalSwitch(dbGroup.primary); + + // Wait for the WAL to appear in each repo + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + { + LOG_INFO_FMT(CFGCMD_CHECK " repo%u archive for WAL (primary)", cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); + + const Storage *storageRepo = storageRepoIdx(repoIdx); + const String *walSegmentFile = walSegmentFind( + storageRepo, repoArchiveId[repoIdx], walSegment, cfgOptionUInt64(cfgOptArchiveTimeout)); + + LOG_INFO_FMT( + "WAL segment %s successfully archived to '%s' on repo%u", strZ(walSegment), + strZ(storagePathP(storageRepo, strNewFmt(STORAGE_REPO_ARCHIVE "/%s/%s", strZ(repoArchiveId[repoIdx]), + strZ(walSegmentFile)))), cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); + } + dbFree(dbGroup.primary); - - // Wait for the WAL to appear in the repo - TimeMSec archiveTimeout = cfgOptionUInt64(cfgOptArchiveTimeout); - const String *walSegmentFile = walSegmentFind(storageRepo(), archiveId, walSegment, archiveTimeout); - - LOG_INFO_FMT( - "WAL segment %s successfully archived to '%s'", strZ(walSegment), - strZ(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_ARCHIVE "/%s/%s", strZ(archiveId), strZ(walSegmentFile))))); } FUNCTION_LOG_RETURN_VOID(); diff --git a/src/command/help/help.auto.c b/src/command/help/help.auto.c index 146d81690..3f0407418 100644 --- a/src/command/help/help.auto.c +++ b/src/command/help/help.auto.c @@ -2166,6 +2166,10 @@ static const unsigned char helpDataPack[] = // ------------------------------------------------------------------------------------------------------------------------- pckTypeBool << 4 | 0x08, // Internal + // repo option + // ------------------------------------------------------------------------------------------------------------------------- + pckTypeBool << 4 | 0x0D, 0x01, // Internal + // repo-azure-account option // ------------------------------------------------------------------------------------------------------------------------- pckTypeStr << 4 | 0x0E, 0x01, 0x0A, // Section diff --git a/src/command/help/help.c b/src/command/help/help.c index afdb0a263..5089df10b 100644 --- a/src/command/help/help.c +++ b/src/command/help/help.c @@ -367,8 +367,8 @@ helpRender(void) const String *defaultValue = helpRenderValue(cfgOptionDefault(optionId), cfgParseOptionType(optionId)); const String *value = NULL; - if (cfgOptionSource(optionId) != cfgSourceDefault) - value = helpRenderValue(cfgOption(optionId), cfgParseOptionType(optionId)); + if (cfgOptionIdxSource(optionId, 0) != cfgSourceDefault) + value = helpRenderValue(cfgOptionIdx(optionId, 0), cfgParseOptionType(optionId)); if (value != NULL || defaultValue != NULL) { @@ -437,8 +437,8 @@ helpRender(void) const String *defaultValue = helpRenderValue(cfgOptionDefault(option.id), cfgParseOptionType(option.id)); const String *value = NULL; - if (cfgOptionSource(option.id) != cfgSourceDefault) - value = helpRenderValue(cfgOption(option.id), cfgParseOptionType(option.id)); + if (cfgOptionIdxSource(option.id, 0) != cfgSourceDefault) + value = helpRenderValue(cfgOptionIdx(option.id, 0), cfgParseOptionType(option.id)); if (value != NULL || defaultValue != NULL) { diff --git a/src/command/info/info.c b/src/command/info/info.c index 1802b942c..0d1e91125 100644 --- a/src/command/info/info.c +++ b/src/command/info/info.c @@ -52,16 +52,19 @@ VARIANT_STRDEF_STATIC(DB_KEY_SYSTEM_ID_VAR, "system-id") VARIANT_STRDEF_STATIC(DB_KEY_VERSION_VAR, "version"); VARIANT_STRDEF_STATIC(INFO_KEY_REPOSITORY_VAR, "repository"); VARIANT_STRDEF_STATIC(KEY_ARCHIVE_VAR, "archive"); +VARIANT_STRDEF_STATIC(KEY_CIPHER_VAR, "cipher"); VARIANT_STRDEF_STATIC(KEY_DATABASE_VAR, "database"); VARIANT_STRDEF_STATIC(KEY_DELTA_VAR, "delta"); VARIANT_STRDEF_STATIC(KEY_DESTINATION_VAR, "destination"); VARIANT_STRDEF_STATIC(KEY_NAME_VAR, "name"); VARIANT_STRDEF_STATIC(KEY_OID_VAR, "oid"); +VARIANT_STRDEF_STATIC(KEY_REPO_KEY_VAR, "repo-key"); VARIANT_STRDEF_STATIC(KEY_SIZE_VAR, "size"); VARIANT_STRDEF_STATIC(KEY_START_VAR, "start"); VARIANT_STRDEF_STATIC(KEY_STOP_VAR, "stop"); +VARIANT_STRDEF_STATIC(REPO_KEY_KEY_VAR, "key"); VARIANT_STRDEF_STATIC(STANZA_KEY_BACKUP_VAR, "backup"); -VARIANT_STRDEF_STATIC(STANZA_KEY_CIPHER_VAR, "cipher"); +VARIANT_STRDEF_STATIC(STANZA_KEY_REPO_VAR, "repo"); VARIANT_STRDEF_STATIC(STANZA_KEY_STATUS_VAR, "status"); VARIANT_STRDEF_STATIC(STANZA_KEY_DB_VAR, "db"); VARIANT_STRDEF_STATIC(STATUS_KEY_CODE_VAR, "code"); @@ -72,6 +75,7 @@ VARIANT_STRDEF_STATIC(STATUS_KEY_MESSAGE_VAR, "message"); #define INFO_STANZA_STATUS_OK "ok" #define INFO_STANZA_STATUS_ERROR "error" +#define INFO_STANZA_MIXED "mixed" #define INFO_STANZA_STATUS_CODE_OK 0 STRING_STATIC(INFO_STANZA_STATUS_MESSAGE_OK_STR, "ok"); @@ -81,30 +85,121 @@ STRING_STATIC(INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_PATH_STR, "missing sta STRING_STATIC(INFO_STANZA_STATUS_MESSAGE_NO_BACKUP_STR, "no valid backups"); #define INFO_STANZA_STATUS_CODE_MISSING_STANZA_DATA 3 STRING_STATIC(INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_DATA_STR, "missing stanza data"); +#define INFO_STANZA_STATUS_CODE_MIXED 4 +// If the cipher or status of the stanza is different across repos, then the overall cipher or status message is mixed +STRING_STATIC(INFO_STANZA_MESSAGE_MIXED_STR, "different across repos"); +#define INFO_STANZA_STATUS_CODE_PG_MISMATCH 5 +STRING_STATIC(INFO_STANZA_STATUS_MESSAGE_PG_MISMATCH_STR, "database mismatch across repos"); -STRING_STATIC(INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP_STR, "backup/expire running"); +#define INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP "backup/expire running" /*********************************************************************************************************************************** -Set error status code and message for the stanza to the code and message passed +Data types and structures +***********************************************************************************************************************************/ +// Repository information for a stanza +typedef struct InfoRepoData +{ + unsigned int key; // User-defined repo key + CipherType cipher; // Encryption type (0 = none) + const String *cipherPass; // Passphrase if the repo is encrypted (else NULL) + int stanzaStatus; // Status code of the the stanza on this repo + unsigned int backupIdx; // Index of the next backup that may be a candidate for sorting + InfoBackup *backupInfo; // Contents of the backup.info file of the stanza on this repo + InfoArchive *archiveInfo; // Contents of the archive.info file of the stanza on this repo +} InfoRepoData; + +#define FUNCTION_LOG_INFO_REPO_DATA_TYPE \ + InfoRepoData * +#define FUNCTION_LOG_INFO_REPO_DATA_FORMAT(value, buffer, bufferSize) \ + objToLog(value, "InfoRepoData", buffer, bufferSize) + +// Stanza with repository list of information for each repository +typedef struct InfoStanzaRepo +{ + const String *name; // Name of the stanza + uint64_t currentPgSystemId; // Current postgres system id for the stanza + unsigned int currentPgVersion; // Current postgres version for the stanza + InfoRepoData *repoList; // List of configured repositories +} InfoStanzaRepo; + +#define FUNCTION_LOG_INFO_STANZA_REPO_TYPE \ + InfoStanzaRepo * +#define FUNCTION_LOG_INFO_STANZA_REPO_FORMAT(value, buffer, bufferSize) \ + objToLog(value, "InfoStanzaRepo", buffer, bufferSize) + +// Group all databases with the same system-id and version together regardless of db-id or repo +typedef struct DbGroup +{ + uint64_t systemId; // Postgres database system id + const String *version; // Postgres database version + bool current; // Is this the current postgres database? + String *archiveMin; // Minimum WAL found for this database over all repositories + String *archiveMax; // Maximum WAL found for this database over all repositories + VariantList *backupList; // List of backups found for this database over all repositories +} DbGroup; + +#define FUNCTION_LOG_DB_GROUP_TYPE \ + DbGroup * +#define FUNCTION_LOG_DB_GROUP_FORMAT(value, buffer, bufferSize) \ + objToLog(value, "DbGroup", buffer, bufferSize) + +/*********************************************************************************************************************************** +Set the overall error status code and message for the stanza to the code and message passed ***********************************************************************************************************************************/ static void -stanzaStatus(const int code, const String *message, bool backupLockHeld, Variant *stanzaInfo) +stanzaStatus(const int code, bool backupLockHeld, Variant *stanzaInfo) { FUNCTION_TEST_BEGIN(); FUNCTION_TEST_PARAM(INT, code); - FUNCTION_TEST_PARAM(STRING, message); FUNCTION_TEST_PARAM(BOOL, backupLockHeld); FUNCTION_TEST_PARAM(VARIANT, stanzaInfo); FUNCTION_TEST_END(); - ASSERT(code >= 0 && code <= 3); - ASSERT(message != NULL); + ASSERT(code >= 0 && code <= 5); ASSERT(stanzaInfo != NULL); KeyValue *statusKv = kvPutKv(varKv(stanzaInfo), STANZA_KEY_STATUS_VAR); kvAdd(statusKv, STATUS_KEY_CODE_VAR, VARINT(code)); - kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(message)); + + switch (code) + { + case INFO_STANZA_STATUS_CODE_OK: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_OK_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_MISSING_STANZA_PATH: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_PATH_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_MISSING_STANZA_DATA: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_DATA_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_NO_BACKUP: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_NO_BACKUP_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_MIXED: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_MESSAGE_MIXED_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_PG_MISMATCH: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_PG_MISMATCH_STR)); + break; + } + } // Construct a specific lock part KeyValue *lockKv = kvPutKv(statusKv, STATUS_KEY_LOCK_VAR); @@ -114,17 +209,69 @@ stanzaStatus(const int code, const String *message, bool backupLockHeld, Variant FUNCTION_TEST_RETURN_VOID(); } +/*********************************************************************************************************************************** +Set the error status code and message for the stanza on the repo to the code and message passed +***********************************************************************************************************************************/ +static void +repoStanzaStatus(const int code, Variant *repoStanzaInfo) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM(INT, code); + FUNCTION_TEST_PARAM(VARIANT, repoStanzaInfo); + FUNCTION_TEST_END(); + + ASSERT(code >= 0 && code <= 3); + ASSERT(repoStanzaInfo != NULL); + + KeyValue *statusKv = kvPutKv(varKv(repoStanzaInfo), STANZA_KEY_STATUS_VAR); + + kvAdd(statusKv, STATUS_KEY_CODE_VAR, VARINT(code)); + + switch (code) + { + case INFO_STANZA_STATUS_CODE_OK: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_OK_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_MISSING_STANZA_PATH: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_PATH_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_MISSING_STANZA_DATA: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_DATA_STR)); + break; + } + + case INFO_STANZA_STATUS_CODE_NO_BACKUP: + { + kvAdd(statusKv, STATUS_KEY_MESSAGE_VAR, VARSTR(INFO_STANZA_STATUS_MESSAGE_NO_BACKUP_STR)); + break; + } + } + + FUNCTION_TEST_RETURN_VOID(); +} + /*********************************************************************************************************************************** Set the data for the archive section of the stanza for the database info from the backup.info file ***********************************************************************************************************************************/ static void -archiveDbList(const String *stanza, const InfoPgData *pgData, VariantList *archiveSection, const InfoArchive *info, bool currentDb) +archiveDbList( + const String *stanza, const InfoPgData *pgData, VariantList *archiveSection, const InfoArchive *info, bool currentDb, + unsigned int repoIdx, unsigned int repoKey) { FUNCTION_TEST_BEGIN(); FUNCTION_TEST_PARAM(STRING, stanza); FUNCTION_TEST_PARAM_P(INFO_PG_DATA, pgData); FUNCTION_TEST_PARAM(VARIANT_LIST, archiveSection); FUNCTION_TEST_PARAM(BOOL, currentDb); + FUNCTION_TEST_PARAM(UINT, repoIdx); + FUNCTION_TEST_PARAM(UINT, repoKey); FUNCTION_TEST_END(); ASSERT(stanza != NULL); @@ -140,10 +287,11 @@ archiveDbList(const String *stanza, const InfoPgData *pgData, VariantList *archi String *archiveStart = NULL; String *archiveStop = NULL; Variant *archiveInfo = varNewKv(kvNew()); + const Storage *storageRepo = storageRepoIdx(repoIdx); // Get a list of WAL directories in the archive repo from oldest to newest, if any exist StringList *walDir = strLstSort( - storageListP(storageRepo(), archivePath, .expression = WAL_SEGMENT_DIR_REGEXP_STR), sortOrderAsc); + storageListP(storageRepo, archivePath, .expression = WAL_SEGMENT_DIR_REGEXP_STR), sortOrderAsc); if (strLstSize(walDir) > 0) { @@ -152,7 +300,7 @@ archiveDbList(const String *stanza, const InfoPgData *pgData, VariantList *archi { // Get a list of all WAL in this WAL dir StringList *list = storageListP( - storageRepo(), strNewFmt("%s/%s", strZ(archivePath), strZ(strLstGet(walDir, idx))), + storageRepo, strNewFmt("%s/%s", strZ(archivePath), strZ(strLstGet(walDir, idx))), .expression = WAL_SEGMENT_FILE_REGEXP_STR); // If wal segments are found, get the oldest one as the archive start @@ -170,7 +318,7 @@ archiveDbList(const String *stanza, const InfoPgData *pgData, VariantList *archi { // Get a list of all WAL in this WAL dir StringList *list = storageListP( - storageRepo(), strNewFmt("%s/%s", strZ(archivePath), strZ(strLstGet(walDir, idx))), + storageRepo, strNewFmt("%s/%s", strZ(archivePath), strZ(strLstGet(walDir, idx))), .expression = WAL_SEGMENT_FILE_REGEXP_STR); // If wal segments are found, get the newest one as the archive stop @@ -191,6 +339,7 @@ archiveDbList(const String *stanza, const InfoPgData *pgData, VariantList *archi KeyValue *databaseInfo = kvPutKv(varKv(archiveInfo), KEY_DATABASE_VAR); kvAdd(databaseInfo, DB_KEY_ID_VAR, VARUINT(pgData->id)); + kvAdd(databaseInfo, KEY_REPO_KEY_VAR, VARUINT(repoKey)); kvPut(varKv(archiveInfo), DB_KEY_ID_VAR, VARSTR(archiveId)); kvPut(varKv(archiveInfo), ARCHIVE_KEY_MIN_VAR, (archiveStart != NULL ? VARSTR(archiveStart) : (Variant *)NULL)); @@ -203,164 +352,236 @@ archiveDbList(const String *stanza, const InfoPgData *pgData, VariantList *archi } /*********************************************************************************************************************************** -For each current backup in the backup.info file of the stanza, set the data for the backup section +Add the backup data to the backup section ***********************************************************************************************************************************/ static void -backupList(VariantList *backupSection, InfoBackup *info, const String *backupLabel) +backupListAdd( + VariantList *backupSection, InfoBackupData *backupData, const String *backupLabel, InfoRepoData *repoData, unsigned int repoIdx) { FUNCTION_TEST_BEGIN(); - FUNCTION_TEST_PARAM(VARIANT_LIST, backupSection); - FUNCTION_TEST_PARAM(INFO_BACKUP, info); - FUNCTION_TEST_PARAM(STRING, backupLabel); + FUNCTION_TEST_PARAM(VARIANT_LIST, backupSection); // The section to add the backup data to + FUNCTION_TEST_PARAM_P(INFO_BACKUP_DATA, backupData); // The data for the backup + FUNCTION_TEST_PARAM(STRING, backupLabel); // Backup label to filter if requested by the user + FUNCTION_TEST_PARAM(INFO_REPO_DATA, repoData); // The repo data where this backup is located + FUNCTION_TEST_PARAM(UINT, repoIdx); // Internal index for the repo FUNCTION_TEST_END(); ASSERT(backupSection != NULL); - ASSERT(info != NULL); + ASSERT(backupData != NULL); + ASSERT(repoData != NULL); - // For each current backup, get the label and corresponding data and build the backup section - for (unsigned int keyIdx = 0; keyIdx < infoBackupDataTotal(info); keyIdx++) + Variant *backupInfo = varNewKv(kvNew()); + + // main keys + kvPut(varKv(backupInfo), BACKUP_KEY_LABEL_VAR, VARSTR(backupData->backupLabel)); + kvPut(varKv(backupInfo), BACKUP_KEY_TYPE_VAR, VARSTR(backupData->backupType)); + kvPut( + varKv(backupInfo), BACKUP_KEY_PRIOR_VAR, + (backupData->backupPrior != NULL ? VARSTR(backupData->backupPrior) : NULL)); + kvPut( + varKv(backupInfo), BACKUP_KEY_REFERENCE_VAR, + (backupData->backupReference != NULL ? varNewVarLst(varLstNewStrLst(backupData->backupReference)) : NULL)); + + // archive section + KeyValue *archiveInfo = kvPutKv(varKv(backupInfo), KEY_ARCHIVE_VAR); + + kvAdd( + archiveInfo, KEY_START_VAR, + (backupData->backupArchiveStart != NULL ? VARSTR(backupData->backupArchiveStart) : NULL)); + kvAdd( + archiveInfo, KEY_STOP_VAR, + (backupData->backupArchiveStop != NULL ? VARSTR(backupData->backupArchiveStop) : NULL)); + + // backrest section + KeyValue *backrestInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_BACKREST_VAR); + + kvAdd(backrestInfo, BACKREST_KEY_FORMAT_VAR, VARUINT(backupData->backrestFormat)); + kvAdd(backrestInfo, BACKREST_KEY_VERSION_VAR, VARSTR(backupData->backrestVersion)); + + // database section + KeyValue *dbInfo = kvPutKv(varKv(backupInfo), KEY_DATABASE_VAR); + + kvAdd(dbInfo, DB_KEY_ID_VAR, VARUINT(backupData->backupPgId)); + kvAdd(dbInfo, KEY_REPO_KEY_VAR, VARUINT(repoData->key)); + + // info section + KeyValue *infoInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_INFO_VAR); + + kvAdd(infoInfo, KEY_SIZE_VAR, VARUINT64(backupData->backupInfoSize)); + kvAdd(infoInfo, KEY_DELTA_VAR, VARUINT64(backupData->backupInfoSizeDelta)); + + // info:repository section + KeyValue *repoInfo = kvPutKv(infoInfo, INFO_KEY_REPOSITORY_VAR); + + kvAdd(repoInfo, KEY_SIZE_VAR, VARUINT64(backupData->backupInfoRepoSize)); + kvAdd(repoInfo, KEY_DELTA_VAR, VARUINT64(backupData->backupInfoRepoSizeDelta)); + + // timestamp section + KeyValue *timeInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_TIMESTAMP_VAR); + + // time_t is generally a signed int so cast it to uint64 since it can never be negative (before 1970) in our system + kvAdd(timeInfo, KEY_START_VAR, VARUINT64((uint64_t)backupData->backupTimestampStart)); + kvAdd(timeInfo, KEY_STOP_VAR, VARUINT64((uint64_t)backupData->backupTimestampStop)); + + // If a backup label was specified and this is that label, then get the manifest + if (backupLabel != NULL && strEq(backupData->backupLabel, backupLabel)) { - // Get the backup data - InfoBackupData backupData = infoBackupData(info, keyIdx); + // Load the manifest file + Manifest *manifest = manifestLoadFile( + storageRepoIdx(repoIdx), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel)), + repoData->cipher, infoPgCipherPass(infoBackupPg(repoData->backupInfo))); - Variant *backupInfo = varNewKv(kvNew()); + // Get the list of databases in this backup + VariantList *databaseSection = varLstNew(); - // main keys - kvPut(varKv(backupInfo), BACKUP_KEY_LABEL_VAR, VARSTR(backupData.backupLabel)); - kvPut(varKv(backupInfo), BACKUP_KEY_TYPE_VAR, VARSTR(backupData.backupType)); - kvPut( - varKv(backupInfo), BACKUP_KEY_PRIOR_VAR, - (backupData.backupPrior != NULL ? VARSTR(backupData.backupPrior) : NULL)); - kvPut( - varKv(backupInfo), BACKUP_KEY_REFERENCE_VAR, - (backupData.backupReference != NULL ? varNewVarLst(varLstNewStrLst(backupData.backupReference)) : NULL)); - - // archive section - KeyValue *archiveInfo = kvPutKv(varKv(backupInfo), KEY_ARCHIVE_VAR); - - kvAdd( - archiveInfo, KEY_START_VAR, - (backupData.backupArchiveStart != NULL ? VARSTR(backupData.backupArchiveStart) : NULL)); - kvAdd( - archiveInfo, KEY_STOP_VAR, - (backupData.backupArchiveStop != NULL ? VARSTR(backupData.backupArchiveStop) : NULL)); - - // backrest section - KeyValue *backrestInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_BACKREST_VAR); - - kvAdd(backrestInfo, BACKREST_KEY_FORMAT_VAR, VARUINT(backupData.backrestFormat)); - kvAdd(backrestInfo, BACKREST_KEY_VERSION_VAR, VARSTR(backupData.backrestVersion)); - - // database section - KeyValue *dbInfo = kvPutKv(varKv(backupInfo), KEY_DATABASE_VAR); - - kvAdd(dbInfo, DB_KEY_ID_VAR, VARUINT(backupData.backupPgId)); - - // info section - KeyValue *infoInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_INFO_VAR); - - kvAdd(infoInfo, KEY_SIZE_VAR, VARUINT64(backupData.backupInfoSize)); - kvAdd(infoInfo, KEY_DELTA_VAR, VARUINT64(backupData.backupInfoSizeDelta)); - - // info:repository section - KeyValue *repoInfo = kvPutKv(infoInfo, INFO_KEY_REPOSITORY_VAR); - - kvAdd(repoInfo, KEY_SIZE_VAR, VARUINT64(backupData.backupInfoRepoSize)); - kvAdd(repoInfo, KEY_DELTA_VAR, VARUINT64(backupData.backupInfoRepoSizeDelta)); - - // timestamp section - KeyValue *timeInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_TIMESTAMP_VAR); - - // time_t is generally a signed int so cast it to uint64 since it can never be negative (before 1970) in our system - kvAdd(timeInfo, KEY_START_VAR, VARUINT64((uint64_t)backupData.backupTimestampStart)); - kvAdd(timeInfo, KEY_STOP_VAR, VARUINT64((uint64_t)backupData.backupTimestampStop)); - - // If a backup label was specified and this is that label, then get the manifest - if (backupLabel != NULL && strEq(backupData.backupLabel, backupLabel)) + for (unsigned int dbIdx = 0; dbIdx < manifestDbTotal(manifest); dbIdx++) { - // Load the manifest file - const Manifest *manifest = manifestLoadFile( - storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel)), - cipherType(cfgOptionStr(cfgOptRepoCipherType)), infoPgCipherPass(infoBackupPg(info))); + const ManifestDb *db = manifestDb(manifest, dbIdx); - // Get the list of databases in this backup - VariantList *databaseSection = varLstNew(); - - for (unsigned int dbIdx = 0; dbIdx < manifestDbTotal(manifest); dbIdx++) + // Do not display template databases + if (db->id > db->lastSystemId) { - const ManifestDb *db = manifestDb(manifest, dbIdx); + Variant *database = varNewKv(kvNew()); - // Do not display template databases - if (db->id > db->lastSystemId) - { - Variant *database = varNewKv(kvNew()); - kvPut(varKv(database), KEY_NAME_VAR, VARSTR(db->name)); - kvPut(varKv(database), KEY_OID_VAR, VARUINT64(db->id)); - varLstAdd(databaseSection, database); - } + kvPut(varKv(database), KEY_NAME_VAR, VARSTR(db->name)); + kvPut(varKv(database), KEY_OID_VAR, VARUINT64(db->id)); + varLstAdd(databaseSection, database); } - - // Add the database section even if none found - kvPut(varKv(backupInfo), BACKUP_KEY_DATABASE_REF_VAR, varNewVarLst(databaseSection)); - - // Get symlinks and tablespaces - VariantList *linkSection = varLstNew(); - VariantList *tablespaceSection = varLstNew(); - - for (unsigned int targetIdx = 0; targetIdx < manifestTargetTotal(manifest); targetIdx++) - { - const ManifestTarget *target = manifestTarget(manifest, targetIdx); - Variant *link = varNewKv(kvNew()); - Variant *tablespace = varNewKv(kvNew()); - - if (target->type == manifestTargetTypeLink) - { - if (target->tablespaceName != NULL) - { - kvPut(varKv(tablespace), KEY_NAME_VAR, VARSTR(target->tablespaceName)); - kvPut(varKv(tablespace), KEY_DESTINATION_VAR, VARSTR(target->path)); - kvPut(varKv(tablespace), KEY_OID_VAR, VARUINT64(target->tablespaceId)); - varLstAdd(tablespaceSection, tablespace); - } - else if (target->file != NULL) - { - kvPut(varKv(link), KEY_NAME_VAR, varNewStr(target->file)); - kvPut( - varKv(link), KEY_DESTINATION_VAR, varNewStr(strNewFmt("%s/%s", strZ(target->path), - strZ(target->file)))); - varLstAdd(linkSection, link); - } - else - { - kvPut(varKv(link), KEY_NAME_VAR, VARSTR(manifestPathPg(target->name))); - kvPut(varKv(link), KEY_DESTINATION_VAR, VARSTR(target->path)); - varLstAdd(linkSection, link); - } - } - } - - kvPut(varKv(backupInfo), BACKUP_KEY_LINK_VAR, (varLstSize(linkSection) > 0 ? varNewVarLst(linkSection) : NULL)); - kvPut( - varKv(backupInfo), BACKUP_KEY_TABLESPACE_VAR, - (varLstSize(tablespaceSection) > 0 ? varNewVarLst(tablespaceSection) : NULL)); - - // Get the list of files with an error in the page checksum - VariantList *checksumPageErrorList = varLstNew(); - - for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(manifest); fileIdx++) - { - const ManifestFile *file = manifestFile(manifest, fileIdx); - - if (file->checksumPageError) - varLstAdd(checksumPageErrorList, varNewStr(manifestPathPg(file->name))); - } - - kvPut( - varKv(backupInfo), BACKUP_KEY_CHECKSUM_PAGE_ERROR_VAR, - (varLstSize(checksumPageErrorList) > 0 ? varNewVarLst(checksumPageErrorList) : NULL)); } - varLstAdd(backupSection, backupInfo); + // Add the database section even if none found + kvPut(varKv(backupInfo), BACKUP_KEY_DATABASE_REF_VAR, varNewVarLst(databaseSection)); + + // Get symlinks and tablespaces + VariantList *linkSection = varLstNew(); + VariantList *tablespaceSection = varLstNew(); + + for (unsigned int targetIdx = 0; targetIdx < manifestTargetTotal(manifest); targetIdx++) + { + const ManifestTarget *target = manifestTarget(manifest, targetIdx); + Variant *link = varNewKv(kvNew()); + Variant *tablespace = varNewKv(kvNew()); + + if (target->type == manifestTargetTypeLink) + { + if (target->tablespaceName != NULL) + { + kvPut(varKv(tablespace), KEY_NAME_VAR, VARSTR(target->tablespaceName)); + kvPut(varKv(tablespace), KEY_DESTINATION_VAR, VARSTR(target->path)); + kvPut(varKv(tablespace), KEY_OID_VAR, VARUINT64(target->tablespaceId)); + varLstAdd(tablespaceSection, tablespace); + } + else if (target->file != NULL) + { + kvPut(varKv(link), KEY_NAME_VAR, varNewStr(target->file)); + kvPut( + varKv(link), KEY_DESTINATION_VAR, varNewStr(strNewFmt("%s/%s", strZ(target->path), + strZ(target->file)))); + varLstAdd(linkSection, link); + } + else + { + kvPut(varKv(link), KEY_NAME_VAR, VARSTR(manifestPathPg(target->name))); + kvPut(varKv(link), KEY_DESTINATION_VAR, VARSTR(target->path)); + varLstAdd(linkSection, link); + } + } + } + + kvPut(varKv(backupInfo), BACKUP_KEY_LINK_VAR, (varLstSize(linkSection) > 0 ? varNewVarLst(linkSection) : NULL)); + kvPut( + varKv(backupInfo), BACKUP_KEY_TABLESPACE_VAR, + (varLstSize(tablespaceSection) > 0 ? varNewVarLst(tablespaceSection) : NULL)); + + // Get the list of files with an error in the page checksum + VariantList *checksumPageErrorList = varLstNew(); + + for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(manifest); fileIdx++) + { + const ManifestFile *file = manifestFile(manifest, fileIdx); + + if (file->checksumPageError) + varLstAdd(checksumPageErrorList, varNewStr(manifestPathPg(file->name))); + } + + kvPut( + varKv(backupInfo), BACKUP_KEY_CHECKSUM_PAGE_ERROR_VAR, + (varLstSize(checksumPageErrorList) > 0 ? varNewVarLst(checksumPageErrorList) : NULL)); + + manifestFree(manifest); + } + + varLstAdd(backupSection, backupInfo); + + FUNCTION_TEST_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +For each current backup in the backup.info file of the stanza, set the data for the backup section +***********************************************************************************************************************************/ +static void +backupList( + VariantList *backupSection, InfoStanzaRepo *stanzaData, const String *backupLabel, unsigned int repoIdxStart, + unsigned int repoIdxMax) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM(VARIANT_LIST, backupSection); // The section to add the backup data to + FUNCTION_TEST_PARAM(INFO_STANZA_REPO, stanzaData); // The data for the stanza + FUNCTION_TEST_PARAM(STRING, backupLabel); // Backup label to filter if requested by the user + FUNCTION_TEST_PARAM(UINT, repoIdxStart); // The start index of the repo array to begin checking + FUNCTION_TEST_PARAM(UINT, repoIdxMax); // The index beyond the last repo index to check + FUNCTION_TEST_END(); + + ASSERT(backupSection != NULL); + ASSERT(stanzaData != NULL); + + unsigned int backupNextRepoIdx = 0; + unsigned int backupTotal = 0; + unsigned int backupTotalProcessed = 0; + + // Get the number of backups to be processed + for (unsigned int repoIdx = repoIdxStart; repoIdx < repoIdxMax; repoIdx++) + { + InfoRepoData *repoData = &stanzaData->repoList[repoIdx]; + + if (repoData->backupInfo != NULL && infoBackupDataTotal(repoData->backupInfo) > 0) + backupTotal += infoBackupDataTotal(repoData->backupInfo); + } + + // Process any backups + while (backupTotalProcessed < backupTotal) + { + time_t backupNextTime = 0; + + // Backups are sorted for each repo, so iterate over the lists to create a single list ordered by backup-timestamp-stop + for (unsigned int repoIdx = repoIdxStart; repoIdx < repoIdxMax; repoIdx++) + { + InfoRepoData *repoData = &stanzaData->repoList[repoIdx]; + + // If there are current backups on this repo for this stanza and the end of this backup list has not been reached + // determine the next backup for display + if (repoData->backupInfo != NULL && infoBackupDataTotal(repoData->backupInfo) > 0 && + repoData->backupIdx < infoBackupDataTotal(repoData->backupInfo)) + { + InfoBackupData backupData = infoBackupData(repoData->backupInfo, repoData->backupIdx); + + // See if this backup should be next in the list, ordering from oldest to newest + if (backupNextTime == 0 || backupData.backupTimestampStop < backupNextTime) + { + backupNextTime = backupData.backupTimestampStop; + backupNextRepoIdx = repoIdx; + } + } + } + + InfoRepoData *repoData = &stanzaData->repoList[backupNextRepoIdx]; + InfoBackupData backupData = infoBackupData(repoData->backupInfo, repoData->backupIdx); + repoData->backupIdx++; + + // Add the backup data to the backup section + backupListAdd(backupSection, &backupData, backupLabel, repoData, backupNextRepoIdx); + + backupTotalProcessed++; } FUNCTION_TEST_RETURN_VOID(); @@ -370,55 +591,519 @@ backupList(VariantList *backupSection, InfoBackup *info, const String *backupLab Set the stanza data for each stanza found in the repo ***********************************************************************************************************************************/ static VariantList * -stanzaInfoList(const String *stanza, StringList *stanzaList, const String *backupLabel) +stanzaInfoList(List *stanzaRepoList, const String *backupLabel, unsigned int repoIdxStart, unsigned int repoIdxMax) { FUNCTION_TEST_BEGIN(); - FUNCTION_TEST_PARAM(STRING, stanza); - FUNCTION_TEST_PARAM(STRING_LIST, stanzaList); + FUNCTION_TEST_PARAM(LIST, stanzaRepoList); FUNCTION_TEST_PARAM(STRING, backupLabel); + FUNCTION_TEST_PARAM(UINT, repoIdxStart); + FUNCTION_TEST_PARAM(UINT, repoIdxMax); FUNCTION_TEST_END(); - ASSERT(stanzaList != NULL); + ASSERT(stanzaRepoList != NULL); VariantList *result = varLstNew(); - bool stanzaFound = false; - // Sort the list - stanzaList = strLstSort(stanzaList, sortOrderAsc); + // Sort the list of stanzas + stanzaRepoList = lstSort(stanzaRepoList, sortOrderAsc); - for (unsigned int idx = 0; idx < strLstSize(stanzaList); idx++) + // Process each stanza requested + for (unsigned int idx = 0; idx < lstSize(stanzaRepoList); idx++) { - String *stanzaListName = strLstGet(stanzaList, idx); - - // If a specific stanza has been requested and this is not it, then continue to the next in the list else indicate found - if (stanza != NULL) - { - if (!strEq(stanza, stanzaListName)) - continue; - else - stanzaFound = true; - } + InfoStanzaRepo *stanzaData = lstGet(stanzaRepoList, idx); // Create the stanzaInfo and section variables Variant *stanzaInfo = varNewKv(kvNew()); VariantList *dbSection = varLstNew(); VariantList *backupSection = varLstNew(); VariantList *archiveSection = varLstNew(); - InfoBackup *info = NULL; + VariantList *repoSection = varLstNew(); + + int stanzaStatusCode = -1; + unsigned int stanzaCipherType = 0; + bool checkBackupLock = false; + + // Set the stanza name and initialize the overall stanza variables + kvPut(varKv(stanzaInfo), KEY_NAME_VAR, VARSTR(stanzaData->name)); + + // Get the stanza for each requested repo + for (unsigned int repoIdx = repoIdxStart; repoIdx < repoIdxMax; repoIdx++) + { + InfoRepoData *repoData = &stanzaData->repoList[repoIdx]; + + Variant *repoInfo = varNewKv(kvNew()); + kvPut(varKv(repoInfo), REPO_KEY_KEY_VAR, VARUINT(repoData->key)); + kvPut(varKv(repoInfo), KEY_CIPHER_VAR, VARSTR(cipherTypeName(repoData->cipher))); + + // If the stanza on this repo has the default status of ok but the backupInfo was not read, then the stanza exists on + // other repos but not this one + if (repoData->stanzaStatus == INFO_STANZA_STATUS_CODE_OK && repoData->backupInfo == NULL) + repoData->stanzaStatus = INFO_STANZA_STATUS_CODE_MISSING_STANZA_PATH; + + // If the backup.info file has been read, then get the backup and archive information on this repo + if (repoData->backupInfo != NULL) + { + // If the backup.info file exists, get the database history information (oldest to newest) and corresponding archive + for (unsigned int pgIdx = infoPgDataTotal(infoBackupPg(repoData->backupInfo)) - 1; (int)pgIdx >= 0; pgIdx--) + { + InfoPgData pgData = infoPgData(infoBackupPg(repoData->backupInfo), pgIdx); + Variant *pgInfo = varNewKv(kvNew()); + + kvPut(varKv(pgInfo), DB_KEY_ID_VAR, VARUINT(pgData.id)); + kvPut(varKv(pgInfo), DB_KEY_SYSTEM_ID_VAR, VARUINT64(pgData.systemId)); + kvPut(varKv(pgInfo), DB_KEY_VERSION_VAR, VARSTR(pgVersionToStr(pgData.version))); + kvPut(varKv(pgInfo), KEY_REPO_KEY_VAR, VARUINT(repoData->key)); + + varLstAdd(dbSection, pgInfo); + + // Get the archive info for the DB from the archive.info file + archiveDbList( + stanzaData->name, &pgData, archiveSection, repoData->archiveInfo, (pgIdx == 0 ? true : false), repoIdx, + repoData->key); + } + + // Set stanza status if the current db sections do not match across repos + InfoPgData backupInfoCurrentPg = infoPgData( + infoBackupPg(repoData->backupInfo), infoPgDataCurrentId(infoBackupPg(repoData->backupInfo))); + + // The current PG system and version must match across repos for the stanza, if not, a failure may have occurred + // during an upgrade or the repo may have been disabled during the stanza upgrade to protect from error propagation + if (stanzaData->currentPgVersion != backupInfoCurrentPg.version || + stanzaData->currentPgSystemId != backupInfoCurrentPg.systemId) + { + stanzaStatusCode = INFO_STANZA_STATUS_CODE_PG_MISMATCH; + } + } + + // If the stanza has been created successfully on at least one repo, then check for a lock on the PG server + if (repoData->stanzaStatus == INFO_STANZA_STATUS_CODE_OK) + { + checkBackupLock = true; + + // If there are no current backups on this repo then set status to no backup + if (infoBackupDataTotal(repoData->backupInfo) == 0) + repoData->stanzaStatus = INFO_STANZA_STATUS_CODE_NO_BACKUP; + } + + // Track the status over all repos if the status for the stanza has not already been determined + if (stanzaStatusCode != INFO_STANZA_STATUS_CODE_PG_MISMATCH) + { + if (repoIdx == repoIdxStart) + stanzaStatusCode = repoData->stanzaStatus; + else + { + stanzaStatusCode = + stanzaStatusCode != repoData->stanzaStatus ? INFO_STANZA_STATUS_CODE_MIXED : repoData->stanzaStatus; + } + } + + // Track cipher type over all repos + if (repoIdx == repoIdxStart) + stanzaCipherType = repoData->cipher; + else + stanzaCipherType = stanzaCipherType != repoData->cipher ? INFO_STANZA_STATUS_CODE_MIXED : repoData->cipher; + + // Add the status of the stanza on the repo to the repo section, and the repo to the repo array + repoStanzaStatus(repoData->stanzaStatus, repoInfo); + varLstAdd(repoSection, repoInfo); + + // Add the database history, backup, archive and repo arrays to the stanza info + kvPut(varKv(stanzaInfo), STANZA_KEY_DB_VAR, varNewVarLst(dbSection)); + kvPut(varKv(stanzaInfo), KEY_ARCHIVE_VAR, varNewVarLst(archiveSection)); + kvPut(varKv(stanzaInfo), STANZA_KEY_REPO_VAR, varNewVarLst(repoSection)); + } + + // Get a sorted list of the data for all existing backups for this stanza over all repos + backupList(backupSection, stanzaData, backupLabel, repoIdxStart, repoIdxMax); + kvPut(varKv(stanzaInfo), STANZA_KEY_BACKUP_VAR, varNewVarLst(backupSection)); + + static bool backupLockHeld = false; + + // If the stanza is OK on at least one repo, then check if there's a local backup running + if (checkBackupLock) + { + // Try to acquire a lock. If not possible, assume another backup or expire is already running. + backupLockHeld = !lockAcquire( + cfgOptionStr(cfgOptLockPath), stanzaData->name, cfgOptionStr(cfgOptExecId), lockTypeBackup, 0, false); + + // Immediately release the lock acquired + lockRelease(!backupLockHeld); + } + + // Set the overall stanza status + stanzaStatus(stanzaStatusCode, backupLockHeld, stanzaInfo); + + // Set the overall cipher type + if (stanzaCipherType != INFO_STANZA_STATUS_CODE_MIXED) + kvPut(varKv(stanzaInfo), KEY_CIPHER_VAR, VARSTR(cipherTypeName(stanzaCipherType))); + else + kvPut(varKv(stanzaInfo), KEY_CIPHER_VAR, VARSTRDEF(INFO_STANZA_MIXED)); + + varLstAdd(result, stanzaInfo); + } + + FUNCTION_TEST_RETURN(result); +} + +/*********************************************************************************************************************************** +Format the text output for archive and backups for a database group of a stanza +***********************************************************************************************************************************/ +static void +formatTextBackup(const DbGroup *dbGroup, String *resultStr) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM(DB_GROUP, dbGroup); + FUNCTION_TEST_PARAM(STRING, resultStr); + FUNCTION_TEST_END(); + + ASSERT(dbGroup != NULL); + + strCatFmt(resultStr, "\n wal archive min/max (%s): ", strZ(dbGroup->version)); + + // Get the archive min/max if there are any archives for the database + if (dbGroup->archiveMin != NULL) + strCatFmt(resultStr, "%s/%s\n", strZ(dbGroup->archiveMin), strZ(dbGroup->archiveMax)); + else + strCatZ(resultStr, "none present\n"); + + for (unsigned int backupIdx = 0; backupIdx < varLstSize(dbGroup->backupList); backupIdx++) + { + KeyValue *backupInfo = varKv(varLstGet(dbGroup->backupList, backupIdx)); + + strCatFmt( + resultStr, "\n %s backup: %s\n", strZ(varStr(kvGet(backupInfo, BACKUP_KEY_TYPE_VAR))), + strZ(varStr(kvGet(backupInfo, BACKUP_KEY_LABEL_VAR)))); + + // Get and format the backup start/stop time + KeyValue *timestampInfo = varKv(kvGet(backupInfo, BACKUP_KEY_TIMESTAMP_VAR)); + + char timeBufferStart[20]; + char timeBufferStop[20]; + time_t timeStart = (time_t)varUInt64(kvGet(timestampInfo, KEY_START_VAR)); + time_t timeStop = (time_t)varUInt64(kvGet(timestampInfo, KEY_STOP_VAR)); + + strftime(timeBufferStart, sizeof(timeBufferStart), "%Y-%m-%d %H:%M:%S", localtime(&timeStart)); + strftime(timeBufferStop, sizeof(timeBufferStop), "%Y-%m-%d %H:%M:%S", localtime(&timeStop)); + + strCatFmt(resultStr, " timestamp start/stop: %s / %s\n", timeBufferStart, timeBufferStop); + strCatZ(resultStr, " wal start/stop: "); + + KeyValue *archiveBackupInfo = varKv(kvGet(backupInfo, KEY_ARCHIVE_VAR)); + + if (kvGet(archiveBackupInfo, KEY_START_VAR) != NULL && kvGet(archiveBackupInfo, KEY_STOP_VAR) != NULL) + { + strCatFmt( + resultStr, "%s / %s\n", strZ(varStr(kvGet(archiveBackupInfo, KEY_START_VAR))), + strZ(varStr(kvGet(archiveBackupInfo, KEY_STOP_VAR)))); + } + else + strCatZ(resultStr, "n/a\n"); + + KeyValue *info = varKv(kvGet(backupInfo, BACKUP_KEY_INFO_VAR)); + + strCatFmt( + resultStr, " database size: %s, backup size: %s\n", + strZ(strSizeFormat(varUInt64Force(kvGet(info, KEY_SIZE_VAR)))), + strZ(strSizeFormat(varUInt64Force(kvGet(info, KEY_DELTA_VAR))))); + + KeyValue *repoInfo = varKv(kvGet(info, INFO_KEY_REPOSITORY_VAR)); + + strCatFmt( + resultStr, " repo%u: size: %s, backup size: %s\n", + varUInt(kvGet(varKv(kvGet(backupInfo, KEY_DATABASE_VAR)), KEY_REPO_KEY_VAR)), + strZ(strSizeFormat(varUInt64Force(kvGet(repoInfo, KEY_SIZE_VAR)))), + strZ(strSizeFormat(varUInt64Force(kvGet(repoInfo, KEY_DELTA_VAR))))); + + if (kvGet(backupInfo, BACKUP_KEY_REFERENCE_VAR) != NULL) + { + StringList *referenceList = strLstNewVarLst(varVarLst(kvGet(backupInfo, BACKUP_KEY_REFERENCE_VAR))); + strCatFmt(resultStr, " backup reference list: %s\n", strZ(strLstJoin(referenceList, ", "))); + } + + if (kvGet(backupInfo, BACKUP_KEY_DATABASE_REF_VAR) != NULL) + { + VariantList *dbSection = kvGetList(backupInfo, BACKUP_KEY_DATABASE_REF_VAR); + + strCatZ(resultStr, " database list:"); + + if (varLstSize(dbSection) == 0) + strCatZ(resultStr, " none\n"); + else + { + for (unsigned int dbIdx = 0; dbIdx < varLstSize(dbSection); dbIdx++) + { + KeyValue *db = varKv(varLstGet(dbSection, dbIdx)); + + strCatFmt( + resultStr, " %s (%s)", strZ(varStr(kvGet(db, KEY_NAME_VAR))), + strZ(varStrForce(kvGet(db, KEY_OID_VAR)))); + + if (dbIdx != varLstSize(dbSection) - 1) + strCatZ(resultStr, ","); + } + + strCat(resultStr, LF_STR); + } + } + + if (kvGet(backupInfo, BACKUP_KEY_LINK_VAR) != NULL) + { + VariantList *linkSection = kvGetList(backupInfo, BACKUP_KEY_LINK_VAR); + + strCatZ(resultStr, " symlinks:\n"); + + for (unsigned int linkIdx = 0; linkIdx < varLstSize(linkSection); linkIdx++) + { + KeyValue *link = varKv(varLstGet(linkSection, linkIdx)); + + strCatFmt( + resultStr, " %s => %s", strZ(varStr(kvGet(link, KEY_NAME_VAR))), + strZ(varStr(kvGet(link, KEY_DESTINATION_VAR)))); + + if (linkIdx != varLstSize(linkSection) - 1) + strCat(resultStr, LF_STR); + } + + strCat(resultStr, LF_STR); + } + + if (kvGet(backupInfo, BACKUP_KEY_TABLESPACE_VAR) != NULL) + { + VariantList *tablespaceSection = kvGetList(backupInfo, BACKUP_KEY_TABLESPACE_VAR); + + strCatZ(resultStr, " tablespaces:\n"); + + for (unsigned int tblIdx = 0; tblIdx < varLstSize(tablespaceSection); tblIdx++) + { + KeyValue *tablespace = varKv(varLstGet(tablespaceSection, tblIdx)); + + strCatFmt( + resultStr, " %s (%s) => %s", strZ(varStr(kvGet(tablespace, KEY_NAME_VAR))), + strZ(varStrForce(kvGet(tablespace, KEY_OID_VAR))), + strZ(varStr(kvGet(tablespace, KEY_DESTINATION_VAR)))); + + if (tblIdx != varLstSize(tablespaceSection) - 1) + strCat(resultStr, LF_STR); + } + + strCat(resultStr, LF_STR); + } + + if (kvGet(backupInfo, BACKUP_KEY_CHECKSUM_PAGE_ERROR_VAR) != NULL) + { + StringList *checksumPageErrorList = strLstNewVarLst( + varVarLst(kvGet(backupInfo, BACKUP_KEY_CHECKSUM_PAGE_ERROR_VAR))); + + strCatFmt( + resultStr, " page checksum error: %s\n", + strZ(strLstJoin(checksumPageErrorList, ", "))); + } + } + + FUNCTION_TEST_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +Format the text output for each database of the stanza +***********************************************************************************************************************************/ +static void +formatTextDb( + const KeyValue *stanzaInfo, String *resultStr, const String *currentPgVersion, uint64_t currentPgSystemId, + const String *backupLabel) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM(KEY_VALUE, stanzaInfo); + FUNCTION_TEST_PARAM(STRING, resultStr); + FUNCTION_TEST_PARAM(STRING, backupLabel); + FUNCTION_TEST_PARAM(STRING, currentPgVersion); + FUNCTION_TEST_PARAM(UINT64, currentPgSystemId); + FUNCTION_TEST_END(); + + ASSERT(stanzaInfo != NULL); + ASSERT(currentPgVersion != NULL); + + VariantList *dbSection = kvGetList(stanzaInfo, STANZA_KEY_DB_VAR); + VariantList *archiveSection = kvGetList(stanzaInfo, KEY_ARCHIVE_VAR); + VariantList *backupSection = kvGetList(stanzaInfo, STANZA_KEY_BACKUP_VAR); + + List *dbGroupList = lstNewP(sizeof(DbGroup)); + + // For each database update the corresponding archive info + for (unsigned int dbIdx = 0; dbIdx < varLstSize(dbSection); dbIdx++) + { + KeyValue *pgInfo = varKv(varLstGet(dbSection, dbIdx)); + uint64_t dbSysId = varUInt64(kvGet(pgInfo, DB_KEY_SYSTEM_ID_VAR)); + const String *dbVersion = varStr(kvGet(pgInfo, DB_KEY_VERSION_VAR)); + unsigned int dbId = varUInt(kvGet(pgInfo, DB_KEY_ID_VAR)); + unsigned int dbRepoKey = varUInt(kvGet(pgInfo, KEY_REPO_KEY_VAR)); + + DbGroup *dbGroup = NULL; + + for (unsigned int dbGrpIdx = 0; dbGrpIdx < lstSize(dbGroupList); dbGrpIdx++) + { + DbGroup *dbGroupInfo = lstGet(dbGroupList, dbGrpIdx); + + if (dbGroupInfo->systemId == dbSysId && strEq(dbGroupInfo->version, dbVersion)) + { + dbGroup = dbGroupInfo; + break; + } + } + + // If the group was not found, then add it + if (dbGroup == NULL) + { + DbGroup dbGroupInfo = + { + .systemId = dbSysId, + .version = dbVersion, + .current = (currentPgSystemId == dbSysId && strEq(currentPgVersion, dbVersion)), + .archiveMin = NULL, + .archiveMax = NULL, + .backupList = varLstNew(), + }; + + lstAdd(dbGroupList, &dbGroupInfo); + + dbGroup = lstGetLast(dbGroupList); + } + + // For each archive of this stanza, update the archive min/max for this database group if necessary + for (unsigned int archiveIdx = 0; archiveIdx < varLstSize(archiveSection); archiveIdx++) + { + KeyValue *archiveInfo = varKv(varLstGet(archiveSection, archiveIdx)); + KeyValue *archiveDbInfo = varKv(kvGet(archiveInfo, KEY_DATABASE_VAR)); + unsigned int archiveDbId = varUInt(kvGet(archiveDbInfo, DB_KEY_ID_VAR)); + unsigned int archiveRepoKey = varUInt(kvGet(archiveDbInfo, KEY_REPO_KEY_VAR)); + + // If there are archives and the min is less than that for this database group, then update the group + if (archiveDbId == dbId && archiveRepoKey == dbRepoKey && varStr(kvGet(archiveInfo, ARCHIVE_KEY_MIN_VAR)) != NULL) + { + // Although archives should continue to increment over system-id/version with different db-ids, there may be cases + // where an archived WAL may exist on both, and if the archive id on a later db is less than a prior instance of + // the same PG, then ensure it is updated as the min. Any need to error should not be handled in the info command. + if (dbGroup->archiveMin == NULL || strCmp(dbGroup->archiveMin, varStr(kvGet(archiveInfo, ARCHIVE_KEY_MIN_VAR))) > 0) + dbGroup->archiveMin = varStrForce(kvGet(archiveInfo, ARCHIVE_KEY_MIN_VAR)); + + if (dbGroup->archiveMax == NULL || strCmp(dbGroup->archiveMax, varStr(kvGet(archiveInfo, ARCHIVE_KEY_MAX_VAR))) < 0) + dbGroup->archiveMax = varStrForce(kvGet(archiveInfo, ARCHIVE_KEY_MAX_VAR)); + } + } + } + + unsigned int backupDbGrpIdxMin = 0; + unsigned int backupDbGrpIdxMax = lstSize(dbGroupList); + + // For every backup (oldest to newest) for the stanza, add it to the database group based on system-id and version + for (unsigned int backupIdx = 0; backupIdx < varLstSize(backupSection); backupIdx++) + { + KeyValue *backupInfo = varKv(varLstGet(backupSection, backupIdx)); + + // If a backup label was specified but this is not it then continue + if (backupLabel != NULL && !strEq(varStr(kvGet(backupInfo, BACKUP_KEY_LABEL_VAR)), backupLabel)) + continue; + + KeyValue *backupDbInfo = varKv(kvGet(backupInfo, KEY_DATABASE_VAR)); + unsigned int backupDbId = varUInt(kvGet(backupDbInfo, DB_KEY_ID_VAR)); + unsigned int backupRepoKey = varUInt(kvGet(backupDbInfo, KEY_REPO_KEY_VAR)); + + // Find the database group this backup belongs to and add it + for (unsigned int dbIdx = 0; dbIdx < varLstSize(dbSection); dbIdx++) + { + KeyValue *pgInfo = varKv(varLstGet(dbSection, dbIdx)); + + unsigned int dbId = varUInt(kvGet(pgInfo, DB_KEY_ID_VAR)); + unsigned int dbRepoKey = varUInt(kvGet(pgInfo, KEY_REPO_KEY_VAR)); + + if (backupDbId == dbId && backupRepoKey == dbRepoKey) + { + for (unsigned int dbGrpIdx = 0; dbGrpIdx < lstSize(dbGroupList); dbGrpIdx++) + { + DbGroup *dbGroupInfo = lstGet(dbGroupList, dbGrpIdx); + + if (dbGroupInfo->systemId == varUInt64(kvGet(pgInfo, DB_KEY_SYSTEM_ID_VAR)) && + strEq(dbGroupInfo->version, varStr(kvGet(pgInfo, DB_KEY_VERSION_VAR)))) + { + varLstAdd(dbGroupInfo->backupList, varLstGet(backupSection, backupIdx)); + + // If we're only looking for one backup, then narrow the db group iterators + if (backupLabel != NULL) + { + backupDbGrpIdxMin = dbGrpIdx; + backupDbGrpIdxMax = dbGrpIdx + 1; + } + + dbGrpIdx = lstSize(dbGroupList); + } + } + + dbIdx = varLstSize(dbSection); + } + } + } + + String *resultCurrent = strNew("\n db (current)"); + bool displayCurrent = false; + + for (unsigned int dbGrpIdx = backupDbGrpIdxMin; dbGrpIdx < backupDbGrpIdxMax; dbGrpIdx++) + { + DbGroup *dbGroupInfo = lstGet(dbGroupList, dbGrpIdx); + + // Sort the results based on current or prior and only show the prior if it has archives or backups + if (dbGroupInfo->current) + { + formatTextBackup(dbGroupInfo, resultCurrent); + displayCurrent = true; + } + else if (dbGroupInfo->archiveMin != NULL || varLstSize(dbGroupInfo->backupList) > 0) + { + strCatZ(resultStr, "\n db (prior)"); + formatTextBackup(dbGroupInfo, resultStr); + } + } + + // Add the current results to the end if necessary (e.g. current not displayed if a specified backup label is only in prior) + if (displayCurrent == true) + strCat(resultStr, resultCurrent); + + FUNCTION_TEST_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +Get the backup and archive info files on the specified repo for the stanza +***********************************************************************************************************************************/ +static void +infoUpdateStanza(const Storage *storage, InfoStanzaRepo *stanzaRepo, unsigned int repoIdx, bool stanzaExists) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM(STORAGE, storage); + FUNCTION_TEST_PARAM(INFO_STANZA_REPO, stanzaRepo); + FUNCTION_TEST_PARAM(UINT, repoIdx); + FUNCTION_TEST_PARAM(BOOL, stanzaExists); + FUNCTION_TEST_END(); + + ASSERT(storage != NULL); + ASSERT(stanzaRepo != NULL); + + InfoBackup *info = NULL; + volatile int stanzaStatus = INFO_STANZA_STATUS_CODE_OK; + + // If the stanza exists, attempt to get the backup.info file + if (stanzaExists) + { // Catch certain errors TRY_BEGIN() { // Attempt to load the backup info file info = infoBackupLoadFile( - storageRepo(), strNewFmt(STORAGE_PATH_BACKUP "/%s/%s", strZ(stanzaListName), INFO_BACKUP_FILE), - cipherType(cfgOptionStr(cfgOptRepoCipherType)), cfgOptionStrNull(cfgOptRepoCipherPass)); + storage, strNewFmt(STORAGE_PATH_BACKUP "/%s/%s", strZ(stanzaRepo->name), INFO_BACKUP_FILE), + stanzaRepo->repoList[repoIdx].cipher, stanzaRepo->repoList[repoIdx].cipherPass); } CATCH(FileMissingError) { // If there is no backup.info then set the status to indicate missing - stanzaStatus( - INFO_STANZA_STATUS_CODE_MISSING_STANZA_DATA, INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_DATA_STR, false, stanzaInfo); + stanzaStatus = INFO_STANZA_STATUS_CODE_MISSING_STANZA_DATA; } CATCH(CryptoError) { @@ -431,327 +1116,32 @@ stanzaInfoList(const String *stanza, StringList *stanzaList, const String *backu } TRY_END(); - // Set the stanza name and cipher. Since we may not be going through the config parsing system, default the cipher to NONE. - kvPut(varKv(stanzaInfo), KEY_NAME_VAR, VARSTR(stanzaListName)); - kvPut(varKv(stanzaInfo), STANZA_KEY_CIPHER_VAR, VARSTR(CIPHER_TYPE_NONE_STR)); - - // If the backup.info file exists, get the database history information (newest to oldest) and corresponding archive + // If backup.info was found, then get the archive.info file, which must exist if the backup.info exists, else throw error if (info != NULL) { - // Determine if encryption is enabled by checking for a cipher passphrase. This is not ideal since it does not tell us - // what type of encryption is in use, but to figure that out we need a way to query the (possibly) remote repo to find - // out. No such mechanism exists so this will have to do for now. Probably the easiest thing to do is store the cipher - // type in the info file. - if (infoPgCipherPass(infoBackupPg(info)) != NULL) - kvPut(varKv(stanzaInfo), STANZA_KEY_CIPHER_VAR, VARSTR(CIPHER_TYPE_AES_256_CBC_STR)); - - for (unsigned int pgIdx = infoPgDataTotal(infoBackupPg(info)) - 1; (int)pgIdx >= 0; pgIdx--) - { - InfoPgData pgData = infoPgData(infoBackupPg(info), pgIdx); - Variant *pgInfo = varNewKv(kvNew()); - - kvPut(varKv(pgInfo), DB_KEY_ID_VAR, VARUINT(pgData.id)); - kvPut(varKv(pgInfo), DB_KEY_SYSTEM_ID_VAR, VARUINT64(pgData.systemId)); - kvPut(varKv(pgInfo), DB_KEY_VERSION_VAR, VARSTR(pgVersionToStr(pgData.version))); - - varLstAdd(dbSection, pgInfo); - - // Get the archive info for the DB from the archive.info file - InfoArchive *info = infoArchiveLoadFile( - storageRepo(), strNewFmt(STORAGE_PATH_ARCHIVE "/%s/%s", strZ(stanzaListName), INFO_ARCHIVE_FILE), - cipherType(cfgOptionStr(cfgOptRepoCipherType)), cfgOptionStrNull(cfgOptRepoCipherPass)); - archiveDbList(stanzaListName, &pgData, archiveSection, info, (pgIdx == 0 ? true : false)); - } - - // Get data for all existing backups for this stanza - backupList(backupSection, info, backupLabel); + stanzaRepo->repoList[repoIdx].archiveInfo = infoArchiveLoadFile( + storage, strNewFmt(STORAGE_PATH_ARCHIVE "/%s/%s", strZ(stanzaRepo->name), INFO_ARCHIVE_FILE), + stanzaRepo->repoList[repoIdx].cipher, stanzaRepo->repoList[repoIdx].cipherPass); } - // Add the database history, backup and archive sections to the stanza info - kvPut(varKv(stanzaInfo), STANZA_KEY_DB_VAR, varNewVarLst(dbSection)); - kvPut(varKv(stanzaInfo), STANZA_KEY_BACKUP_VAR, varNewVarLst(backupSection)); - kvPut(varKv(stanzaInfo), KEY_ARCHIVE_VAR, varNewVarLst(archiveSection)); - - // If a status has not already been set, check if there's a local backup running - static bool backupLockHeld = false; - - if (kvGet(varKv(stanzaInfo), STANZA_KEY_STATUS_VAR) == NULL) - { - // Try to acquire a lock. If not possible, assume another backup or expire is already running. - backupLockHeld = !lockAcquire( - cfgOptionStr(cfgOptLockPath), stanzaListName, cfgOptionStr(cfgOptExecId), lockTypeBackup, 0, false); - - // Immediately release the lock acquired - lockRelease(!backupLockHeld); - } - - // If a status has not already been set and there are no backups then set status to no backup - if (kvGet(varKv(stanzaInfo), STANZA_KEY_STATUS_VAR) == NULL && - varLstSize(kvGetList(varKv(stanzaInfo), STANZA_KEY_BACKUP_VAR)) == 0) - { - stanzaStatus(INFO_STANZA_STATUS_CODE_NO_BACKUP, INFO_STANZA_STATUS_MESSAGE_NO_BACKUP_STR, backupLockHeld, stanzaInfo); - } - - // If a status has still not been set then set it to OK - if (kvGet(varKv(stanzaInfo), STANZA_KEY_STATUS_VAR) == NULL) - stanzaStatus(INFO_STANZA_STATUS_CODE_OK, INFO_STANZA_STATUS_MESSAGE_OK_STR, backupLockHeld, stanzaInfo); - - varLstAdd(result, stanzaInfo); + stanzaRepo->repoList[repoIdx].stanzaStatus = stanzaStatus; } + else + stanzaRepo->repoList[repoIdx].stanzaStatus = INFO_STANZA_STATUS_CODE_MISSING_STANZA_PATH; - // If looking for a specific stanza and it was not found, set minimum info and the status - if (stanza != NULL && !stanzaFound) + stanzaRepo->repoList[repoIdx].backupInfo = info; + + // If the backup.info and therefore archive.info exist, and the currentPg has not been set for the stanza, then set it + if (stanzaRepo->currentPgVersion == 0 && stanzaRepo->repoList[repoIdx].backupInfo != NULL) { - Variant *stanzaInfo = varNewKv(kvNew()); + InfoPgData backupInfoCurrentPg = infoPgData( + infoBackupPg(stanzaRepo->repoList[repoIdx].backupInfo), + infoPgDataCurrentId(infoBackupPg(stanzaRepo->repoList[repoIdx].backupInfo))); - kvPut(varKv(stanzaInfo), KEY_NAME_VAR, VARSTR(stanza)); - - kvPut(varKv(stanzaInfo), STANZA_KEY_DB_VAR, varNewVarLst(varLstNew())); - kvPut(varKv(stanzaInfo), STANZA_KEY_BACKUP_VAR, varNewVarLst(varLstNew())); - kvPut(varKv(stanzaInfo), KEY_ARCHIVE_VAR, varNewVarLst(varLstNew())); - - stanzaStatus( - INFO_STANZA_STATUS_CODE_MISSING_STANZA_PATH, INFO_STANZA_STATUS_MESSAGE_MISSING_STANZA_PATH_STR, false, stanzaInfo); - varLstAdd(result, stanzaInfo); + stanzaRepo->currentPgVersion = backupInfoCurrentPg.version; + stanzaRepo->currentPgSystemId = backupInfoCurrentPg.systemId; } - FUNCTION_TEST_RETURN(result); -} - -/*********************************************************************************************************************************** -Format the text output for each database of the stanza -***********************************************************************************************************************************/ -static void -formatTextDb(const KeyValue *stanzaInfo, String *resultStr, const String *backupLabel) -{ - FUNCTION_TEST_BEGIN(); - FUNCTION_TEST_PARAM(KEY_VALUE, stanzaInfo); - FUNCTION_TEST_PARAM(STRING, resultStr); - FUNCTION_TEST_PARAM(STRING, backupLabel); - FUNCTION_TEST_END(); - - ASSERT(stanzaInfo != NULL); - - VariantList *dbSection = kvGetList(stanzaInfo, STANZA_KEY_DB_VAR); - VariantList *archiveSection = kvGetList(stanzaInfo, KEY_ARCHIVE_VAR); - VariantList *backupSection = kvGetList(stanzaInfo, STANZA_KEY_BACKUP_VAR); - - // For each database (working from oldest to newest) find the corresponding archive and backup info - for (unsigned int dbIdx = 0; dbIdx < varLstSize(dbSection); dbIdx++) - { - KeyValue *pgInfo = varKv(varLstGet(dbSection, dbIdx)); - unsigned int dbId = varUInt(kvGet(pgInfo, DB_KEY_ID_VAR)); - bool backupInDb = false; - - // If a backup label was specified then see if it exists for this database - if (backupLabel != NULL) - { - for (unsigned int backupIdx = 0; backupIdx < varLstSize(backupSection); backupIdx++) - { - KeyValue *backupInfo = varKv(varLstGet(backupSection, backupIdx)); - KeyValue *backupDbInfo = varKv(kvGet(backupInfo, KEY_DATABASE_VAR)); - unsigned int backupDbId = varUInt(kvGet(backupDbInfo, DB_KEY_ID_VAR)); - - // If the backup requested is in this database then break from the loop - if (backupDbId == dbId) - { - backupInDb = true; - break; - } - } - } - - // If backup label was requested but was not found in this database then continue to next database - if (backupLabel != NULL && !backupInDb) - continue; - - // List is ordered so 0 is always the current DB index - if (dbIdx == varLstSize(dbSection) - 1) - strCatZ(resultStr, "\n db (current)"); - - // Get the min/max archive information for the database - String *archiveResult = strNew(""); - - for (unsigned int archiveIdx = 0; archiveIdx < varLstSize(archiveSection); archiveIdx++) - { - KeyValue *archiveInfo = varKv(varLstGet(archiveSection, archiveIdx)); - KeyValue *archiveDbInfo = varKv(kvGet(archiveInfo, KEY_DATABASE_VAR)); - unsigned int archiveDbId = varUInt(kvGet(archiveDbInfo, DB_KEY_ID_VAR)); - - if (archiveDbId == dbId) - { - strCatFmt( - archiveResult, "\n wal archive min/max (%s): ", - strZ(varStr(kvGet(archiveInfo, DB_KEY_ID_VAR)))); - - // Get the archive min/max if there are any archives for the database - if (kvGet(archiveInfo, ARCHIVE_KEY_MIN_VAR) != NULL) - { - strCatFmt( - archiveResult, "%s/%s\n", strZ(varStr(kvGet(archiveInfo, ARCHIVE_KEY_MIN_VAR))), - strZ(varStr(kvGet(archiveInfo, ARCHIVE_KEY_MAX_VAR)))); - } - else - strCatZ(archiveResult, "none present\n"); - } - } - - // Get the information for each current backup - String *backupResult = strNew(""); - - for (unsigned int backupIdx = 0; backupIdx < varLstSize(backupSection); backupIdx++) - { - KeyValue *backupInfo = varKv(varLstGet(backupSection, backupIdx)); - KeyValue *backupDbInfo = varKv(kvGet(backupInfo, KEY_DATABASE_VAR)); - unsigned int backupDbId = varUInt(kvGet(backupDbInfo, DB_KEY_ID_VAR)); - - // If a backup label was specified but this is not it then continue - if (backupLabel != NULL && !strEq(varStr(kvGet(backupInfo, BACKUP_KEY_LABEL_VAR)), backupLabel)) - continue; - - if (backupDbId == dbId) - { - strCatFmt( - backupResult, "\n %s backup: %s\n", strZ(varStr(kvGet(backupInfo, BACKUP_KEY_TYPE_VAR))), - strZ(varStr(kvGet(backupInfo, BACKUP_KEY_LABEL_VAR)))); - - KeyValue *timestampInfo = varKv(kvGet(backupInfo, BACKUP_KEY_TIMESTAMP_VAR)); - - // Get and format the backup start/stop time - char timeBufferStart[20]; - char timeBufferStop[20]; - time_t timeStart = (time_t)varUInt64(kvGet(timestampInfo, KEY_START_VAR)); - time_t timeStop = (time_t)varUInt64(kvGet(timestampInfo, KEY_STOP_VAR)); - - strftime(timeBufferStart, sizeof(timeBufferStart), "%Y-%m-%d %H:%M:%S", localtime(&timeStart)); - strftime(timeBufferStop, sizeof(timeBufferStop), "%Y-%m-%d %H:%M:%S", localtime(&timeStop)); - - strCatFmt( - backupResult, " timestamp start/stop: %s / %s\n", timeBufferStart, timeBufferStop); - strCatZ(backupResult, " wal start/stop: "); - - KeyValue *archiveBackupInfo = varKv(kvGet(backupInfo, KEY_ARCHIVE_VAR)); - - if (kvGet(archiveBackupInfo, KEY_START_VAR) != NULL && - kvGet(archiveBackupInfo, KEY_STOP_VAR) != NULL) - { - strCatFmt( - backupResult, "%s / %s\n", strZ(varStr(kvGet(archiveBackupInfo, KEY_START_VAR))), - strZ(varStr(kvGet(archiveBackupInfo, KEY_STOP_VAR)))); - } - else - strCatZ(backupResult, "n/a\n"); - - KeyValue *info = varKv(kvGet(backupInfo, BACKUP_KEY_INFO_VAR)); - - strCatFmt( - backupResult, " database size: %s, backup size: %s\n", - strZ(strSizeFormat(varUInt64Force(kvGet(info, KEY_SIZE_VAR)))), - strZ(strSizeFormat(varUInt64Force(kvGet(info, KEY_DELTA_VAR))))); - - KeyValue *repoInfo = varKv(kvGet(info, INFO_KEY_REPOSITORY_VAR)); - - strCatFmt( - backupResult, " repository size: %s, repository backup size: %s\n", - strZ(strSizeFormat(varUInt64Force(kvGet(repoInfo, KEY_SIZE_VAR)))), - strZ(strSizeFormat(varUInt64Force(kvGet(repoInfo, KEY_DELTA_VAR))))); - - if (kvGet(backupInfo, BACKUP_KEY_REFERENCE_VAR) != NULL) - { - StringList *referenceList = strLstNewVarLst(varVarLst(kvGet(backupInfo, BACKUP_KEY_REFERENCE_VAR))); - strCatFmt(backupResult, " backup reference list: %s\n", strZ(strLstJoin(referenceList, ", "))); - } - - if (kvGet(backupInfo, BACKUP_KEY_DATABASE_REF_VAR) != NULL) - { - VariantList *dbSection = kvGetList(backupInfo, BACKUP_KEY_DATABASE_REF_VAR); - strCatZ(backupResult, " database list:"); - - if (varLstSize(dbSection) == 0) - strCatZ(backupResult, " none\n"); - else - { - for (unsigned int dbIdx = 0; dbIdx < varLstSize(dbSection); dbIdx++) - { - KeyValue *db = varKv(varLstGet(dbSection, dbIdx)); - strCatFmt( - backupResult, " %s (%s)", strZ(varStr(kvGet(db, KEY_NAME_VAR))), - strZ(varStrForce(kvGet(db, KEY_OID_VAR)))); - - if (dbIdx != varLstSize(dbSection) - 1) - strCatZ(backupResult, ","); - } - - strCat(backupResult, LF_STR); - } - } - - if (kvGet(backupInfo, BACKUP_KEY_LINK_VAR) != NULL) - { - VariantList *linkSection = kvGetList(backupInfo, BACKUP_KEY_LINK_VAR); - strCatZ(backupResult, " symlinks:\n"); - - for (unsigned int linkIdx = 0; linkIdx < varLstSize(linkSection); linkIdx++) - { - KeyValue *link = varKv(varLstGet(linkSection, linkIdx)); - - strCatFmt( - backupResult, " %s => %s", strZ(varStr(kvGet(link, KEY_NAME_VAR))), - strZ(varStr(kvGet(link, KEY_DESTINATION_VAR)))); - - if (linkIdx != varLstSize(linkSection) - 1) - strCat(backupResult, LF_STR); - } - - strCat(backupResult, LF_STR); - } - - if (kvGet(backupInfo, BACKUP_KEY_TABLESPACE_VAR) != NULL) - { - VariantList *tablespaceSection = kvGetList(backupInfo, BACKUP_KEY_TABLESPACE_VAR); - strCatZ(backupResult, " tablespaces:\n"); - - for (unsigned int tblIdx = 0; tblIdx < varLstSize(tablespaceSection); tblIdx++) - { - KeyValue *tablespace = varKv(varLstGet(tablespaceSection, tblIdx)); - - strCatFmt( - backupResult, " %s (%s) => %s", strZ(varStr(kvGet(tablespace, KEY_NAME_VAR))), - strZ(varStrForce(kvGet(tablespace, KEY_OID_VAR))), - strZ(varStr(kvGet(tablespace, KEY_DESTINATION_VAR)))); - - if (tblIdx != varLstSize(tablespaceSection) - 1) - strCat(backupResult, LF_STR); - } - - strCat(backupResult, LF_STR); - } - - if (kvGet(backupInfo, BACKUP_KEY_CHECKSUM_PAGE_ERROR_VAR) != NULL) - { - StringList *checksumPageErrorList = strLstNewVarLst( - varVarLst(kvGet(backupInfo, BACKUP_KEY_CHECKSUM_PAGE_ERROR_VAR))); - - strCatFmt( - backupResult, " page checksum error: %s\n", - strZ(strLstJoin(checksumPageErrorList, ", "))); - } - } - } - - // If there is data to display, then display it - if (strSize(archiveResult) > 0 || strSize(backupResult) > 0) - { - if (dbIdx != varLstSize(dbSection) - 1) - strCatZ(resultStr, "\n db (prior)"); - - if (strSize(archiveResult) > 0) - strCat(resultStr, archiveResult); - - if (strSize(backupResult) > 0) - strCat(resultStr, backupResult); - } - } FUNCTION_TEST_RETURN_VOID(); } @@ -770,34 +1160,115 @@ infoRender(void) // Get stanza if specified const String *stanza = cfgOptionStrNull(cfgOptStanza); + // Initialize the list of stanzas on all repos + List *stanzaRepoList = lstNewP(sizeof(InfoStanzaRepo), .sortOrder = sortOrderAsc, .comparator = lstComparatorStr); + // Get the backup label if specified const String *backupLabel = cfgOptionStrNull(cfgOptSet); - // Get the repo storage in case it is remote and encryption settings need to be pulled down - storageRepo(); - - // If a backup set was specified, see if the manifest exists + // Since the --set option depends on the --stanza option, the parser will error before this if the backup label is + // specified but a stanza is not if (backupLabel != NULL) { if (!strEq(cfgOptionStr(cfgOptOutput), CFGOPTVAL_INFO_OUTPUT_TEXT_STR)) - THROW(ConfigError, "option 'set' is currently only valid for text output"); + THROW(ConfigError, "option '" CFGOPT_SET "' is currently only valid for text output"); - if (!storageExistsP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel)))) + if (!(cfgOptionTest(cfgOptRepo)) && cfgOptionGroupIdxTotal(cfgOptGrpRepo) > 1) + THROW(OptionRequiredError, "option '" CFGOPT_REPO "' is required when specifying a backup set"); + } + + // Initialize the repo index + unsigned int repoIdxStart = 0; + unsigned int repoIdxMax = cfgOptionGroupIdxTotal(cfgOptGrpRepo); + unsigned int repoTotal = repoIdxMax; + + // If the repo was specified then set index to the array location and max to loop only once + if (cfgOptionTest(cfgOptRepo)) + { + repoIdxStart = cfgOptionGroupIdxDefault(cfgOptGrpRepo); + repoIdxMax = repoIdxStart + 1; + } + + for (unsigned int repoIdx = repoIdxStart; repoIdx < repoIdxMax; repoIdx++) + { + // Get the repo storage in case it is remote and encryption settings need to be pulled down + const Storage *storageRepo = storageRepoIdx(repoIdx); + + // If a backup set was specified, see if the manifest exists + if (backupLabel != NULL) { - THROW_FMT( - FileMissingError, "manifest does not exist for backup '%s'\n" - "HINT: is the backup listed when running the info command with --stanza option only?", strZ(backupLabel)); + if (!storageExistsP(storageRepo, strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel)))) + { + THROW_FMT( + FileMissingError, "manifest does not exist for backup '%s'\n" + "HINT: is the backup listed when running the info command with --stanza option only?", strZ(backupLabel)); + } + } + + // Get a list of stanzas in the backup directory + StringList *stanzaNameList = strLstSort(storageListP(storageRepo, STORAGE_PATH_BACKUP_STR), sortOrderAsc); + + // All stanzas will be "found" if they are in the storage list + bool stanzaExists = true; + + if (stanza != NULL) + { + // If a specific stanza was requested and it is not on this repo, then stanzaExists flag will be reset to false + if (strLstSize(stanzaNameList) == 0 || !strLstExists(stanzaNameList, stanza)) + stanzaExists = false; + + // Narrow the list to only the requested stanza + strLstFree(stanzaNameList); + stanzaNameList = strLstNew(); + strLstAdd(stanzaNameList, stanza); + } + + // Process each stanza + for (unsigned int stanzaIdx = 0; stanzaIdx < strLstSize(stanzaNameList); stanzaIdx++) + { + String *stanzaName = strLstGet(stanzaNameList, stanzaIdx); + + // Get the stanza if it is already in the list + InfoStanzaRepo *stanzaRepo = lstFind(stanzaRepoList, &stanzaName); + + // If the stanza was already added to the array, then update this repo for the stanza, else the stanza has not yet + // been added to the list, so add it + if (stanzaRepo != NULL) + infoUpdateStanza(storageRepo, stanzaRepo, repoIdx, stanzaExists); + else + { + InfoStanzaRepo stanzaRepo = + { + .name = stanzaName, + .currentPgVersion = 0, + .currentPgSystemId = 0, + .repoList = memNew(repoTotal * sizeof(InfoRepoData)), + }; + + // Initialize all the repos + for (unsigned int repoListIdx = 0; repoListIdx < repoTotal; repoListIdx++) + { + stanzaRepo.repoList[repoListIdx] = (InfoRepoData) + { + .key = cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoListIdx), + .cipher = cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoListIdx)), + .cipherPass = cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoListIdx), + }; + } + + // Update the info for this repo + infoUpdateStanza(storageRepo, &stanzaRepo, repoIdx, stanzaExists); + lstAdd(stanzaRepoList, &stanzaRepo); + } } } - // Get a list of stanzas in the backup directory - StringList *stanzaList = storageListP(storageRepo(), STORAGE_PATH_BACKUP_STR); VariantList *infoList = varLstNew(); String *resultStr = strNew(""); // If the backup storage exists, then search for and process any stanzas - if (strLstSize(stanzaList) > 0 || stanza != NULL) - infoList = stanzaInfoList(stanza, stanzaList, backupLabel); + if (lstSize(stanzaRepoList) > 0) + infoList = stanzaInfoList(stanzaRepoList, backupLabel, repoIdxStart, repoIdxMax); // Format text output if (strEq(cfgOptionStr(cfgOptOutput), CFGOPTVAL_INFO_OUTPUT_TEXT_STR)) @@ -808,14 +1279,14 @@ infoRender(void) for (unsigned int stanzaIdx = 0; stanzaIdx < varLstSize(infoList); stanzaIdx++) { KeyValue *stanzaInfo = varKv(varLstGet(infoList, stanzaIdx)); + const String *stanzaName = varStr(kvGet(stanzaInfo, KEY_NAME_VAR)); // Add a carriage return between stanzas if (stanzaIdx > 0) strCatFmt(resultStr, "\n"); // Stanza name and status - strCatFmt( - resultStr, "stanza: %s\n status: ", strZ(varStr(kvGet(stanzaInfo, KEY_NAME_VAR)))); + strCatFmt(resultStr, "stanza: %s\n status: ", strZ(stanzaName)); // If an error has occurred, provide the information that is available and move onto next stanza KeyValue *stanzaStatus = varKv(kvGet(stanzaInfo, STANZA_KEY_STATUS_VAR)); @@ -824,54 +1295,84 @@ infoRender(void) // Get the lock info KeyValue *lockKv = varKv(kvGet(stanzaStatus, STATUS_KEY_LOCK_VAR)); KeyValue *backupLockKv = varKv(kvGet(lockKv, STATUS_KEY_LOCK_BACKUP_VAR)); + bool backupLockHeld = varBool(kvGet(backupLockKv, STATUS_KEY_LOCK_BACKUP_HELD_VAR)); if (statusCode != INFO_STANZA_STATUS_CODE_OK) { - // Change displayed status if backup lock is found - if (varBool(kvGet(backupLockKv, STATUS_KEY_LOCK_BACKUP_HELD_VAR))) + // Update the overall stanza status and change displayed status if backup lock is found + if (statusCode == INFO_STANZA_STATUS_CODE_MIXED || statusCode == INFO_STANZA_STATUS_CODE_PG_MISMATCH) { strCatFmt( - resultStr, "%s (%s, %s)\n", INFO_STANZA_STATUS_ERROR, - strZ(varStr(kvGet(stanzaStatus, STATUS_KEY_MESSAGE_VAR))), - strZ(INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP_STR)); + resultStr, "%s%s\n", + statusCode == INFO_STANZA_STATUS_CODE_MIXED ? INFO_STANZA_MIXED : + strZ(strNewFmt(INFO_STANZA_STATUS_ERROR " (%s)", + strZ(varStr(kvGet(stanzaStatus, STATUS_KEY_MESSAGE_VAR))))), + backupLockHeld == true ? " (" INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP ")" : ""); + + // Output the status per repo + VariantList *repoSection = kvGetList(stanzaInfo, STANZA_KEY_REPO_VAR); + + for (unsigned int repoIdx = 0; repoIdx < varLstSize(repoSection); repoIdx++) + { + KeyValue *repoInfo = varKv(varLstGet(repoSection, repoIdx)); + KeyValue *repoStatus = varKv(kvGet(repoInfo, STANZA_KEY_STATUS_VAR)); + + strCatFmt( + resultStr, " repo%u: ", varUInt(kvGet(repoInfo, REPO_KEY_KEY_VAR))); + strCatFmt( + resultStr, "%s", + varInt(kvGet(repoStatus, STATUS_KEY_CODE_VAR)) == INFO_STANZA_STATUS_CODE_OK ? + INFO_STANZA_STATUS_OK "\n" : strZ(strNewFmt(INFO_STANZA_STATUS_ERROR " (%s)\n", + strZ(varStr(kvGet(repoStatus, STATUS_KEY_MESSAGE_VAR)))))); + } } else { strCatFmt( - resultStr, "%s (%s)\n", INFO_STANZA_STATUS_ERROR, - strZ(varStr(kvGet(stanzaStatus, STATUS_KEY_MESSAGE_VAR)))); + resultStr, "%s (%s%s\n", INFO_STANZA_STATUS_ERROR, + strZ(varStr(kvGet(stanzaStatus, STATUS_KEY_MESSAGE_VAR))), + backupLockHeld == true ? ", " INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP ")" : ")"); } - - if (statusCode == INFO_STANZA_STATUS_CODE_MISSING_STANZA_DATA || - statusCode == INFO_STANZA_STATUS_CODE_NO_BACKUP) - { - strCatFmt( - resultStr, " cipher: %s\n", strZ(varStr(kvGet(stanzaInfo, STANZA_KEY_CIPHER_VAR)))); - - // If there is a backup.info file but no backups, then process the archive info - if (statusCode == INFO_STANZA_STATUS_CODE_NO_BACKUP) - formatTextDb(stanzaInfo, resultStr, NULL); - } - - continue; } else { // Change displayed status if backup lock is found - if (varBool(kvGet(backupLockKv, STATUS_KEY_LOCK_BACKUP_HELD_VAR))) - { - strCatFmt( - resultStr, "%s (%s)\n", INFO_STANZA_STATUS_OK, strZ(INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP_STR)); - } + if (backupLockHeld) + strCatFmt(resultStr, "%s (%s)\n", INFO_STANZA_STATUS_OK, INFO_STANZA_STATUS_MESSAGE_LOCK_BACKUP); else strCatFmt(resultStr, "%s\n", INFO_STANZA_STATUS_OK); } - // Cipher - strCatFmt( - resultStr, " cipher: %s\n", strZ(varStr(kvGet(stanzaInfo, STANZA_KEY_CIPHER_VAR)))); + // Add cipher type if the stanza is found on at least one repo + if (statusCode != INFO_STANZA_STATUS_CODE_MISSING_STANZA_PATH) + { + strCatFmt(resultStr, " cipher: %s\n", strZ(varStr(kvGet(stanzaInfo, KEY_CIPHER_VAR)))); - formatTextDb(stanzaInfo, resultStr, backupLabel); + // If the cipher is mixed across repos for this stanza then display the per-repo cipher type + if (strEq(varStr(kvGet(stanzaInfo, KEY_CIPHER_VAR)), STRDEF(INFO_STANZA_MIXED))) + { + VariantList *repoSection = kvGetList(stanzaInfo, STANZA_KEY_REPO_VAR); + + for (unsigned int repoIdx = 0; repoIdx < varLstSize(repoSection); repoIdx++) + { + KeyValue *repoInfo = varKv(varLstGet(repoSection, repoIdx)); + + strCatFmt( + resultStr, " repo%u: %s\n", varUInt(kvGet(repoInfo, REPO_KEY_KEY_VAR)), + strZ(varStr(kvGet(repoInfo, KEY_CIPHER_VAR)))); + } + } + } + + // Get the current database for this stanza + if (varLstSize(kvGetList(stanzaInfo, STANZA_KEY_DB_VAR)) > 0) + { + InfoStanzaRepo *stanzaRepo = lstFind(stanzaRepoList, &stanzaName); + + formatTextDb( + stanzaInfo, resultStr, pgVersionToStr(stanzaRepo->currentPgVersion), stanzaRepo->currentPgSystemId, + backupLabel); + } } } else diff --git a/src/command/stanza/create.c b/src/command/stanza/create.c index d9d279c24..739521d93 100644 --- a/src/command/stanza/create.c +++ b/src/command/stanza/create.c @@ -29,8 +29,7 @@ cmdStanzaCreate(void) { FUNCTION_LOG_VOID(logLevelDebug); - // Verify the repo is local and that a stop was not issued before proceeding - repoIsLocalVerify(); + // Verify that a stop was not issued before proceeding lockStopTest(); MEM_CONTEXT_TEMP_BEGIN() @@ -38,102 +37,121 @@ cmdStanzaCreate(void) if (cfgOptionBool(cfgOptForce)) LOG_WARN("option --force is no longer supported"); - const Storage *storageRepoReadStanza = storageRepo(); - const Storage *storageRepoWriteStanza = storageRepoWrite(); - InfoArchive *infoArchive = NULL; - InfoBackup *infoBackup = NULL; + // Verify all the repos are local (i.e. repo*-host is not set) - this is a simple way to confirm we are not executing + // stanza-create from a pg host as it will immediately error + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + repoIsLocalVerifyIdx(repoIdx); // Get the version and system information - validating it if the database is online PgControl pgControl = pgValidate(); - bool archiveInfoFileExists = storageExistsP(storageRepoReadStanza, INFO_ARCHIVE_PATH_FILE_STR); - bool archiveInfoFileCopyExists = storageExistsP(storageRepoReadStanza, INFO_ARCHIVE_PATH_FILE_COPY_STR); - bool backupInfoFileExists = storageExistsP(storageRepoReadStanza, INFO_BACKUP_PATH_FILE_STR); - bool backupInfoFileCopyExists = storageExistsP(storageRepoReadStanza, INFO_BACKUP_PATH_FILE_COPY_STR); - - // If neither archive info nor backup info files exist and nothing else exists in the stanza directory - // then create the stanza - if (!archiveInfoFileExists && !archiveInfoFileCopyExists && !backupInfoFileExists && !backupInfoFileCopyExists) + // For each repository configured, create the stanza + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) { - bool archiveNotEmpty = strLstSize(storageListP(storageRepoReadStanza, STORAGE_REPO_ARCHIVE_STR)) > 0 ? true : false; - bool backupNotEmpty = strLstSize(storageListP(storageRepoReadStanza, STORAGE_REPO_BACKUP_STR)) > 0 ? true : false; + LOG_INFO_FMT( + CFGCMD_STANZA_CREATE " for stanza '%s' on repo%u", strZ(cfgOptionStr(cfgOptStanza)), + cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); - // If something else exists in the backup or archive directories for this stanza, then error - if (archiveNotEmpty || backupNotEmpty) + const Storage *storageRepoReadStanza = storageRepoIdx(repoIdx); + const Storage *storageRepoWriteStanza = storageRepoIdxWrite(repoIdx); + InfoArchive *infoArchive = NULL; + InfoBackup *infoBackup = NULL; + + bool archiveInfoFileExists = storageExistsP(storageRepoReadStanza, INFO_ARCHIVE_PATH_FILE_STR); + bool archiveInfoFileCopyExists = storageExistsP(storageRepoReadStanza, INFO_ARCHIVE_PATH_FILE_COPY_STR); + bool backupInfoFileExists = storageExistsP(storageRepoReadStanza, INFO_BACKUP_PATH_FILE_STR); + bool backupInfoFileCopyExists = storageExistsP(storageRepoReadStanza, INFO_BACKUP_PATH_FILE_COPY_STR); + + // If neither archive info nor backup info files exist and nothing else exists in the stanza directory + // then create the stanza + if (!archiveInfoFileExists && !archiveInfoFileCopyExists && !backupInfoFileExists && !backupInfoFileCopyExists) + { + bool archiveNotEmpty = strLstSize(storageListP(storageRepoReadStanza, STORAGE_REPO_ARCHIVE_STR)) > 0 ? true : false; + bool backupNotEmpty = strLstSize(storageListP(storageRepoReadStanza, STORAGE_REPO_BACKUP_STR)) > 0 ? true : false; + + // If something else exists in the backup or archive directories for this stanza, then error + if (archiveNotEmpty || backupNotEmpty) + { + THROW_FMT( + PathNotEmptyError, "%s%s%snot empty", (backupNotEmpty ? "backup directory " : ""), + (backupNotEmpty && archiveNotEmpty ? "and/or " : ""), (archiveNotEmpty ? "archive directory " : "")); + } + + // If the repo is encrypted, generate a cipher passphrase for encrypting subsequent archive files + String *cipherPassSub = cipherPassGen(cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx))); + + // Create and save archive info + infoArchive = infoArchiveNew(pgControl.version, pgControl.systemId, cipherPassSub); + + infoArchiveSaveFile( + infoArchive, storageRepoWriteStanza, INFO_ARCHIVE_PATH_FILE_STR, + cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + + // If the repo is encrypted, generate a cipher passphrase for encrypting subsequent backup files + cipherPassSub = cipherPassGen(cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx))); + + // Create and save backup info + infoBackup = infoBackupNew(pgControl.version, pgControl.systemId, pgControl.catalogVersion, cipherPassSub); + + infoBackupSaveFile( + infoBackup, storageRepoWriteStanza, INFO_BACKUP_PATH_FILE_STR, + cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + } + // Else if at least one archive and one backup info file exists, then ensure both are valid + else if ((archiveInfoFileExists || archiveInfoFileCopyExists) && (backupInfoFileExists || backupInfoFileCopyExists)) + { + // Error if there is a mismatch between the archive and backup info files or the database version/system Id matches + // current database + checkStanzaInfoPg( + storageRepoReadStanza, pgControl.version, pgControl.systemId, + cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + + // The files are valid - upgrade + const String *sourceFile = NULL; + const String *destinationFile = NULL; + + // If the existing files are valid, then, if a file is missing, copy the existing one to the missing one to ensure + // there is both a .info and .info.copy + if (!archiveInfoFileExists || !archiveInfoFileCopyExists) + { + sourceFile = archiveInfoFileExists ? INFO_ARCHIVE_PATH_FILE_STR : INFO_ARCHIVE_PATH_FILE_COPY_STR; + destinationFile = !archiveInfoFileExists ? INFO_ARCHIVE_PATH_FILE_STR : INFO_ARCHIVE_PATH_FILE_COPY_STR; + + storageCopyP( + storageNewReadP(storageRepoReadStanza, sourceFile), + storageNewWriteP(storageRepoWriteStanza, destinationFile)); + } + + if (!backupInfoFileExists || !backupInfoFileCopyExists) + { + sourceFile = backupInfoFileExists ? INFO_BACKUP_PATH_FILE_STR : INFO_BACKUP_PATH_FILE_COPY_STR; + destinationFile = !backupInfoFileExists ? INFO_BACKUP_PATH_FILE_STR : INFO_BACKUP_PATH_FILE_COPY_STR; + + storageCopyP( + storageNewReadP(storageRepoReadStanza, sourceFile), + storageNewWriteP(storageRepoWriteStanza, destinationFile)); + } + + // If no files copied, then the stanza was already valid + if (sourceFile == NULL) + { + LOG_INFO_FMT( + "stanza '%s' already exists on repo%u and is valid", strZ(cfgOptionStr(cfgOptStanza)), + cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); + } + } + // Else if both .info and corresponding .copy file are missing for one but not the other, then error - the user will + // have to make a conscious effort to determine if deleting the stanza on this repo is appropriate or other action is + else { THROW_FMT( - PathNotEmptyError, "%s%s%snot empty", (backupNotEmpty ? "backup directory " : ""), - (backupNotEmpty && archiveNotEmpty ? "and/or " : ""), (archiveNotEmpty ? "archive directory " : "")); + FileMissingError, + "%s on repo%u\n" + "HINT: this may be a symptom of repository corruption!", + ((archiveInfoFileExists || archiveInfoFileCopyExists) ? + "archive.info exists but backup.info is missing" : "backup.info exists but archive.info is missing"), + cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); } - - // If the repo is encrypted, generate a cipher passphrase for encrypting subsequent archive files - String *cipherPassSub = cipherPassGen(cipherType(cfgOptionStr(cfgOptRepoCipherType))); - - // Create and save archive info - infoArchive = infoArchiveNew(pgControl.version, pgControl.systemId, cipherPassSub); - - infoArchiveSaveFile( - infoArchive, storageRepoWriteStanza, INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - - // If the repo is encrypted, generate a cipher passphrase for encrypting subsequent backup files - cipherPassSub = cipherPassGen(cipherType(cfgOptionStr(cfgOptRepoCipherType))); - - // Create and save backup info - infoBackup = infoBackupNew(pgControl.version, pgControl.systemId, pgControl.catalogVersion, cipherPassSub); - - infoBackupSaveFile( - infoBackup, storageRepoWriteStanza, INFO_BACKUP_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - } - // Else if at least one archive and one backup info file exists, then ensure both are valid - else if ((archiveInfoFileExists || archiveInfoFileCopyExists) && (backupInfoFileExists || backupInfoFileCopyExists)) - { - // Error if there is a mismatch between the archive and backup info files or the database version/system Id matches - // current database - checkStanzaInfoPg( - storageRepoReadStanza, pgControl.version, pgControl.systemId, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - - // The files are valid - upgrade - const String *sourceFile = NULL; - const String *destinationFile = NULL; - - // If the existing files are valid, then, if a file is missing, copy the existing one to the missing one to ensure - // there is both a .info and .info.copy - if (!archiveInfoFileExists || !archiveInfoFileCopyExists) - { - sourceFile = archiveInfoFileExists ? INFO_ARCHIVE_PATH_FILE_STR : INFO_ARCHIVE_PATH_FILE_COPY_STR; - destinationFile = !archiveInfoFileExists ? INFO_ARCHIVE_PATH_FILE_STR : INFO_ARCHIVE_PATH_FILE_COPY_STR; - - storageCopyP( - storageNewReadP(storageRepoReadStanza, sourceFile), - storageNewWriteP(storageRepoWriteStanza, destinationFile)); - } - - if (!backupInfoFileExists || !backupInfoFileCopyExists) - { - sourceFile = backupInfoFileExists ? INFO_BACKUP_PATH_FILE_STR : INFO_BACKUP_PATH_FILE_COPY_STR; - destinationFile = !backupInfoFileExists ? INFO_BACKUP_PATH_FILE_STR : INFO_BACKUP_PATH_FILE_COPY_STR; - - storageCopyP( - storageNewReadP(storageRepoReadStanza, sourceFile), - storageNewWriteP(storageRepoWriteStanza, destinationFile)); - } - - // If no files copied, then the stanza was already valid - if (sourceFile == NULL) - LOG_INFO_FMT("stanza '%s' already exists and is valid", strZ(cfgOptionStr(cfgOptStanza))); - } - // Else if both .info and corresponding .copy file are missing for one but not the other, then error - else - { - THROW_FMT( - FileMissingError, - "%s\n" - "HINT: this may be a symptom of repository corruption!", - ((archiveInfoFileExists || archiveInfoFileCopyExists) ? - "archive.info exists but backup.info is missing" : "backup.info exists but archive.info is missing")); } } MEM_CONTEXT_TEMP_END(); diff --git a/src/command/stanza/delete.c b/src/command/stanza/delete.c index a4a278c4d..76e57440a 100644 --- a/src/command/stanza/delete.c +++ b/src/command/stanza/delete.c @@ -87,8 +87,10 @@ stanzaDelete(const Storage *storageRepoWriteStanza, const StringList *archiveLis { THROW_FMT( PgRunningError, PG_FILE_POSTMASTERPID " exists - looks like " PG_NAME " is running. " - "To delete stanza '%s', shut down " PG_NAME " for stanza '%s' and try again, or use --force.", - strZ(cfgOptionStr(cfgOptStanza)), strZ(cfgOptionStr(cfgOptStanza))); + "To delete stanza '%s' on repo%u, shut down " PG_NAME " for stanza '%s' and try again, or use --force.", + strZ(cfgOptionStr(cfgOptStanza)), + cfgOptionGroupIdxToKey(cfgOptGrpRepo, cfgOptionGroupIdxDefault(cfgOptGrpRepo)), + strZ(cfgOptionStr(cfgOptStanza))); } // Delete the archive info files diff --git a/src/command/stanza/upgrade.c b/src/command/stanza/upgrade.c index 5350dcfcc..5f307fcad 100644 --- a/src/command/stanza/upgrade.c +++ b/src/command/stanza/upgrade.c @@ -29,70 +29,86 @@ cmdStanzaUpgrade(void) { FUNCTION_LOG_VOID(logLevelDebug); - // Verify the repo is local and that a stop was not issued before proceeding - repoIsLocalVerify(); + // Verify that a stop was not issued before proceeding lockStopTest(); MEM_CONTEXT_TEMP_BEGIN() { - const Storage *storageRepoReadStanza = storageRepo(); - const Storage *storageRepoWriteStanza = storageRepoWrite(); - bool infoArchiveUpgrade = false; - bool infoBackupUpgrade = false; - // Get the version and system information - validating it if the database is online PgControl pgControl = pgValidate(); - // Load the info files (errors if missing) - InfoArchive *infoArchive = infoArchiveLoadFile( - storageRepoReadStanza, INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - InfoPgData archiveInfo = infoPgData(infoArchivePg(infoArchive), infoPgDataCurrentId(infoArchivePg(infoArchive))); + // Verify all the repos are local (i.e. repo*-host is not set) - this is a simple way to confirm we are not executing + // stanza-upgrade from a pg host as it will immediately error + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + repoIsLocalVerifyIdx(repoIdx); - InfoBackup *infoBackup = infoBackupLoadFile( - storageRepoReadStanza, INFO_BACKUP_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - InfoPgData backupInfo = infoPgData(infoBackupPg(infoBackup), infoPgDataCurrentId(infoBackupPg(infoBackup))); - - // Since the file save of archive.info and backup.info are not atomic, then check and update each separately. - // Update archive - if (pgControl.version != archiveInfo.version || pgControl.systemId != archiveInfo.systemId) + // For each repository configured, upgrade the stanza + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) { - infoArchivePgSet(infoArchive, pgControl.version, pgControl.systemId); - infoArchiveUpgrade = true; + LOG_INFO_FMT( + CFGCMD_STANZA_UPGRADE " for stanza '%s' on repo%u", strZ(cfgOptionStr(cfgOptStanza)), + cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); + + const Storage *storageRepoReadStanza = storageRepoIdx(repoIdx); + const Storage *storageRepoWriteStanza = storageRepoIdxWrite(repoIdx); + bool infoArchiveUpgrade = false; + bool infoBackupUpgrade = false; + + // Load the info files (errors if missing) + InfoArchive *infoArchive = infoArchiveLoadFile( + storageRepoReadStanza, INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), + cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + InfoPgData archiveInfo = infoPgData(infoArchivePg(infoArchive), infoPgDataCurrentId(infoArchivePg(infoArchive))); + + InfoBackup *infoBackup = infoBackupLoadFile( + storageRepoReadStanza, INFO_BACKUP_PATH_FILE_STR, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), + cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + InfoPgData backupInfo = infoPgData(infoBackupPg(infoBackup), infoPgDataCurrentId(infoBackupPg(infoBackup))); + + // Since the file save of archive.info and backup.info are not atomic, then check and update each separately. + // Update archive + if (pgControl.version != archiveInfo.version || pgControl.systemId != archiveInfo.systemId) + { + infoArchivePgSet(infoArchive, pgControl.version, pgControl.systemId); + infoArchiveUpgrade = true; + } + + // Update backup + if (pgControl.version != backupInfo.version || pgControl.systemId != backupInfo.systemId) + { + infoBackupPgSet(infoBackup, pgControl.version, pgControl.systemId, pgControl.catalogVersion); + infoBackupUpgrade = true; + } + + // Get the backup and archive info pg data and throw an error if the ids do not match before saving (even if only one + // needed to be updated) + backupInfo = infoPgData(infoBackupPg(infoBackup), infoPgDataCurrentId(infoBackupPg(infoBackup))); + archiveInfo = infoPgData(infoArchivePg(infoArchive), infoPgDataCurrentId(infoArchivePg(infoArchive))); + checkStanzaInfo(&archiveInfo, &backupInfo); + + // Save archive info + if (infoArchiveUpgrade) + { + infoArchiveSaveFile( + infoArchive, storageRepoWriteStanza, INFO_ARCHIVE_PATH_FILE_STR, + cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + } + + // Save backup info + if (infoBackupUpgrade) + { + infoBackupSaveFile( + infoBackup, storageRepoWriteStanza, INFO_BACKUP_PATH_FILE_STR, + cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, repoIdx)), cfgOptionIdxStrNull(cfgOptRepoCipherPass, repoIdx)); + } + + if (!(infoArchiveUpgrade || infoBackupUpgrade)) + { + LOG_INFO_FMT( + "stanza '%s' on repo%u is already up to date", strZ(cfgOptionStr(cfgOptStanza)), + cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx)); + } } - - // Update backup - if (pgControl.version != backupInfo.version || pgControl.systemId != backupInfo.systemId) - { - infoBackupPgSet(infoBackup, pgControl.version, pgControl.systemId, pgControl.catalogVersion); - infoBackupUpgrade = true; - } - - // Get the backup and archive info pg data and throw an error if the ids do not match before saving (even if only one - // needed to be updated) - backupInfo = infoPgData(infoBackupPg(infoBackup), infoPgDataCurrentId(infoBackupPg(infoBackup))); - archiveInfo = infoPgData(infoArchivePg(infoArchive), infoPgDataCurrentId(infoArchivePg(infoArchive))); - checkStanzaInfo(&archiveInfo, &backupInfo); - - // Save archive info - if (infoArchiveUpgrade) - { - infoArchiveSaveFile( - infoArchive, storageRepoWriteStanza, INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - } - - // Save backup info - if (infoBackupUpgrade) - { - infoBackupSaveFile( - infoBackup, storageRepoWriteStanza, INFO_BACKUP_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStrNull(cfgOptRepoCipherPass)); - } - - if (!(infoArchiveUpgrade || infoBackupUpgrade)) - LOG_INFO_FMT("stanza '%s' is already up to date", strZ(cfgOptionStr(cfgOptStanza))); } MEM_CONTEXT_TEMP_END(); diff --git a/src/config/config.auto.c b/src/config/config.auto.c index b576145d9..b623ef5b2 100644 --- a/src/config/config.auto.c +++ b/src/config/config.auto.c @@ -310,6 +310,7 @@ STRING_EXTERN(CFGOPT_RAW_STR, CFGOPT_RAW); STRING_EXTERN(CFGOPT_RECOVERY_OPTION_STR, CFGOPT_RECOVERY_OPTION); STRING_EXTERN(CFGOPT_RECURSE_STR, CFGOPT_RECURSE); STRING_EXTERN(CFGOPT_REMOTE_TYPE_STR, CFGOPT_REMOTE_TYPE); +STRING_EXTERN(CFGOPT_REPO_STR, CFGOPT_REPO); STRING_EXTERN(CFGOPT_RESUME_STR, CFGOPT_RESUME); STRING_EXTERN(CFGOPT_SCK_BLOCK_STR, CFGOPT_SCK_BLOCK); STRING_EXTERN(CFGOPT_SCK_KEEP_ALIVE_STR, CFGOPT_SCK_KEEP_ALIVE); diff --git a/src/config/config.auto.h b/src/config/config.auto.h index 738739beb..0479d2c43 100644 --- a/src/config/config.auto.h +++ b/src/config/config.auto.h @@ -166,6 +166,8 @@ Option constants STRING_DECLARE(CFGOPT_RECURSE_STR); #define CFGOPT_REMOTE_TYPE "remote-type" STRING_DECLARE(CFGOPT_REMOTE_TYPE_STR); +#define CFGOPT_REPO "repo" + STRING_DECLARE(CFGOPT_REPO_STR); #define CFGOPT_RESUME "resume" STRING_DECLARE(CFGOPT_RESUME_STR); #define CFGOPT_SCK_BLOCK "sck-block" @@ -205,7 +207,7 @@ Option constants #define CFGOPT_TYPE "type" STRING_DECLARE(CFGOPT_TYPE_STR); -#define CFG_OPTION_TOTAL 127 +#define CFG_OPTION_TOTAL 128 /*********************************************************************************************************************************** Command enum @@ -315,6 +317,7 @@ typedef enum cfgOptRecoveryOption, cfgOptRecurse, cfgOptRemoteType, + cfgOptRepo, cfgOptRepoAzureAccount, cfgOptRepoAzureCaFile, cfgOptRepoAzureCaPath, diff --git a/src/config/config.c b/src/config/config.c index a4f9d6cf1..4d21d5c63 100644 --- a/src/config/config.c +++ b/src/config/config.c @@ -394,6 +394,7 @@ cfgOptionGroupIdxDefault(ConfigOptionGroup groupId) ASSERT(configLocal != NULL); ASSERT(groupId < CFG_OPTION_GROUP_TOTAL); + ASSERT(configLocal->optionGroup[groupId].indexDefaultExists); FUNCTION_TEST_RETURN(configLocal->optionGroup[groupId].indexDefault); } @@ -486,6 +487,8 @@ cfgOptionIdxDefault(ConfigOption optionId) ASSERT(configLocal != NULL); ASSERT(optionId < CFG_OPTION_TOTAL); + ASSERT( + !configLocal->option[optionId].group || configLocal->optionGroup[configLocal->option[optionId].groupId].indexDefaultExists); FUNCTION_TEST_RETURN( configLocal->option[optionId].group ? configLocal->optionGroup[configLocal->option[optionId].groupId].indexDefault : 0); diff --git a/src/config/config.intern.h b/src/config/config.intern.h index 490df7b79..eee9196a5 100644 --- a/src/config/config.intern.h +++ b/src/config/config.intern.h @@ -46,6 +46,7 @@ typedef struct Config const char *name; // Name bool valid; // Is option group valid for the current command? unsigned int indexTotal; // Total number of indexes with values in option group + bool indexDefaultExists; // Is there a default index for non-indexed functions? unsigned int indexDefault; // Default index (usually 0) unsigned int indexMap[CFG_OPTION_KEY_MAX]; // List of index to key index mappings } optionGroup[CFG_OPTION_GROUP_TOTAL]; diff --git a/src/config/load.c b/src/config/load.c index d9edd3c3b..b5f4a8a36 100644 --- a/src/config/load.c +++ b/src/config/load.c @@ -19,6 +19,8 @@ Configuration Load #include "config/config.intern.h" #include "config/load.h" #include "config/parse.h" +#include "storage/cifs/storage.h" +#include "storage/posix/storage.h" #include "storage/helper.h" /*********************************************************************************************************************************** @@ -66,6 +68,46 @@ cfgLoadUpdateOption(void) { FUNCTION_LOG_VOID(logLevelTrace); + // Make sure repo option is set for the default command role when it is not internal and more than one repo is configured or the + // first configured repo is not key 1. Filter out any commands where this does not apply. + if (!cfgCommandHelp() && cfgCommand() != cfgCmdInfo && cfgOptionValid(cfgOptRepo) && !cfgOptionTest(cfgOptRepo) && + (cfgOptionGroupIdxTotal(cfgOptGrpRepo) > 1 || cfgOptionGroupIdxToKey(cfgOptGrpRepo, 0) != 1)) + { + THROW_FMT( + OptionRequiredError, + "%s command requires option: " CFGOPT_REPO "\n" + "HINT: this command requires a specific repository to operate on", + cfgCommandName(cfgCommand())); + } + + // If there is more than one repo configured + if (cfgOptionGroupIdxTotal(cfgOptGrpRepo) > 1) + { + for (unsigned int optionIdx = 0; optionIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); optionIdx++) + { + // If the repo is local and either posix or cifs + if (!(cfgOptionIdxTest(cfgOptRepoHost, optionIdx)) && + (strEq(cfgOptionIdxStr(cfgOptRepoType, optionIdx), STORAGE_POSIX_TYPE_STR) || + strEq(cfgOptionIdxStr(cfgOptRepoType, optionIdx), STORAGE_CIFS_TYPE_STR))) + { + // Ensure a local repo does not have the same path as another local repo of the same type + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) + { + if (optionIdx != repoIdx && !(cfgOptionIdxTest(cfgOptRepoHost, repoIdx)) && + strEq(cfgOptionIdxStr(cfgOptRepoType, optionIdx), cfgOptionIdxStr(cfgOptRepoType, repoIdx)) && + strEq(cfgOptionIdxStr(cfgOptRepoPath, optionIdx), cfgOptionIdxStr(cfgOptRepoPath, repoIdx))) + { + THROW_FMT( + OptionInvalidValueError, + "local repo%u and repo%u paths are both '%s' but must be different", + cfgOptionGroupIdxToKey(cfgOptGrpRepo, optionIdx), cfgOptionGroupIdxToKey(cfgOptGrpRepo, repoIdx), + strZ(cfgOptionIdxStr(cfgOptRepoPath, repoIdx))); + } + } + } + } + } + // Set default for repo-host-cmd if (cfgOptionValid(cfgOptRepoHostCmd)) cfgOptionDefaultSet(cfgOptRepoHostCmd, VARSTR(cfgExe())); @@ -223,17 +265,20 @@ cfgLoadUpdateOption(void) } } - // Error if an S3 bucket name contains dots - if (cfgOptionGroupValid(cfgOptGrpRepo) && cfgOptionTest(cfgOptRepoS3Bucket) && cfgOptionBool(cfgOptRepoS3VerifyTls) && - strChr(cfgOptionStr(cfgOptRepoS3Bucket), '.') != -1) + // For each possible repo, error if an S3 bucket name contains dots + for (unsigned int repoIdx = 0; repoIdx < cfgOptionGroupIdxTotal(cfgOptGrpRepo); repoIdx++) { - THROW_FMT( - OptionInvalidValueError, - "'%s' is not valid for option '%s'" - "\nHINT: RFC-2818 forbids dots in wildcard matches." - "\nHINT: TLS/SSL verification cannot proceed with this bucket name." - "\nHINT: remove dots from the bucket name.", - strZ(cfgOptionStr(cfgOptRepoS3Bucket)), cfgOptionName(cfgOptRepoS3Bucket)); + if (cfgOptionIdxTest(cfgOptRepoS3Bucket, repoIdx) && cfgOptionIdxBool(cfgOptRepoS3VerifyTls, repoIdx) && + strChr(cfgOptionIdxStr(cfgOptRepoS3Bucket, repoIdx), '.') != -1) + { + THROW_FMT( + OptionInvalidValueError, + "'%s' is not valid for option '%s'" + "\nHINT: RFC-2818 forbids dots in wildcard matches." + "\nHINT: TLS/SSL verification cannot proceed with this bucket name." + "\nHINT: remove dots from the bucket name.", + strZ(cfgOptionIdxStr(cfgOptRepoS3Bucket, repoIdx)), cfgOptionIdxName(cfgOptRepoS3Bucket, repoIdx)); + } } // Check/update compress-type if compress is valid. There should be no references to the compress option outside this block. @@ -328,6 +373,13 @@ cfgLoad(unsigned int argListSize, const char *argList[]) // Parse config from command line and config file configParse(argListSize, argList, true); + // Check that only repo1 is configured. This is temporary until the multi-repo support is finalized. + if (cfgCommandRole() == cfgCmdRoleDefault && cfgOptionGroupValid(cfgOptGrpRepo) && + (cfgOptionGroupIdxTotal(cfgOptGrpRepo) > 1 || cfgOptionGroupIdxToKey(cfgOptGrpRepo, 0) != 1)) + { + THROW_FMT(OptionInvalidValueError, "only repo1 may be configured"); + } + // Initialize dry-run mode for storage when valid for the current command storageHelperDryRunInit(cfgOptionValid(cfgOptDryRun) && cfgOptionBool(cfgOptDryRun)); diff --git a/src/config/parse.auto.c b/src/config/parse.auto.c index abf800f7b..d0241d678 100644 --- a/src/config/parse.auto.c +++ b/src/config/parse.auto.c @@ -2937,6 +2937,64 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ), ), + // ----------------------------------------------------------------------------------------------------------------------------- + PARSE_RULE_OPTION + ( + PARSE_RULE_OPTION_NAME("repo"), + PARSE_RULE_OPTION_TYPE(cfgOptTypeInteger), + PARSE_RULE_OPTION_REQUIRED(false), + PARSE_RULE_OPTION_SECTION(cfgSectionCommandLine), + + PARSE_RULE_OPTION_COMMAND_ROLE_DEFAULT_VALID_LIST + ( + PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) + PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) + PARSE_RULE_OPTION_COMMAND(cfgCmdExpire) + PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoGet) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoLs) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) + PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) + PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) + PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) + ), + + PARSE_RULE_OPTION_COMMAND_ROLE_ASYNC_VALID_LIST + ( + PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) + ), + + PARSE_RULE_OPTION_COMMAND_ROLE_LOCAL_VALID_LIST + ( + PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) + PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) + PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) + PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) + ), + + PARSE_RULE_OPTION_COMMAND_ROLE_REMOTE_VALID_LIST + ( + PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) + PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) + PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) + PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoGet) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoLs) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) + PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) + PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) + PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) + ), + + PARSE_RULE_OPTION_OPTIONAL_LIST + ( + PARSE_RULE_OPTION_OPTIONAL_ALLOW_RANGE(1, 4), + ), + ), + // ----------------------------------------------------------------------------------------------------------------------------- PARSE_RULE_OPTION ( @@ -2987,7 +3045,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -2996,9 +3053,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3061,7 +3115,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3070,9 +3123,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3135,7 +3185,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3144,9 +3193,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3209,7 +3255,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3218,9 +3263,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3283,7 +3325,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3292,9 +3333,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3359,7 +3397,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3368,9 +3405,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3434,7 +3468,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3443,9 +3476,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3508,7 +3538,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3517,9 +3546,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3590,7 +3616,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3599,9 +3624,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3667,7 +3689,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3676,9 +3697,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3744,7 +3762,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3753,9 +3770,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -3818,7 +3832,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -3827,9 +3840,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4296,7 +4306,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4305,9 +4314,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4489,7 +4495,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4498,9 +4503,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4563,7 +4565,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4572,9 +4573,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4637,7 +4635,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4646,9 +4643,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4711,7 +4705,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4720,9 +4713,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4785,7 +4775,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4794,9 +4783,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4860,7 +4846,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4869,9 +4854,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -4935,7 +4917,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -4944,9 +4925,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5009,7 +4987,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5018,9 +4995,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5091,7 +5065,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5100,9 +5073,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5168,7 +5138,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5177,9 +5146,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5242,7 +5208,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5251,9 +5216,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5317,7 +5279,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5326,9 +5287,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5391,7 +5349,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5400,9 +5357,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5473,7 +5427,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5482,9 +5435,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -5549,7 +5499,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = ( PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) - PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) @@ -5558,9 +5507,6 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] = PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) - PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) ), @@ -8442,6 +8388,14 @@ static const struct option optionList[] = .val = PARSE_OPTION_FLAG | cfgOptRemoteType, }, + // repo option + // ----------------------------------------------------------------------------------------------------------------------------- + { + .name = "repo", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | cfgOptRepo, + }, + // repo-azure-account option // ----------------------------------------------------------------------------------------------------------------------------- { @@ -8453,6 +8407,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-account", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, }, + { + .name = "repo2-azure-account", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, + }, + { + .name = "reset-repo2-azure-account", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, + }, + { + .name = "repo3-azure-account", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, + }, + { + .name = "reset-repo3-azure-account", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, + }, + { + .name = "repo4-azure-account", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, + }, + { + .name = "reset-repo4-azure-account", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureAccount, + }, // repo-azure-ca-file option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8465,6 +8446,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-ca-file", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, }, + { + .name = "repo2-azure-ca-file", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, + }, + { + .name = "reset-repo2-azure-ca-file", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, + }, + { + .name = "repo3-azure-ca-file", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, + }, + { + .name = "reset-repo3-azure-ca-file", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, + }, + { + .name = "repo4-azure-ca-file", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, + }, + { + .name = "reset-repo4-azure-ca-file", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaFile, + }, // repo-azure-ca-path option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8477,6 +8485,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-ca-path", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, }, + { + .name = "repo2-azure-ca-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, + }, + { + .name = "reset-repo2-azure-ca-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, + }, + { + .name = "repo3-azure-ca-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, + }, + { + .name = "reset-repo3-azure-ca-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, + }, + { + .name = "repo4-azure-ca-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, + }, + { + .name = "reset-repo4-azure-ca-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureCaPath, + }, // repo-azure-container option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8489,6 +8524,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-container", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, }, + { + .name = "repo2-azure-container", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, + }, + { + .name = "reset-repo2-azure-container", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, + }, + { + .name = "repo3-azure-container", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, + }, + { + .name = "reset-repo3-azure-container", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, + }, + { + .name = "repo4-azure-container", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, + }, + { + .name = "reset-repo4-azure-container", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureContainer, + }, // repo-azure-endpoint option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8501,6 +8563,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-endpoint", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, }, + { + .name = "repo2-azure-endpoint", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, + }, + { + .name = "reset-repo2-azure-endpoint", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, + }, + { + .name = "repo3-azure-endpoint", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, + }, + { + .name = "reset-repo3-azure-endpoint", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, + }, + { + .name = "repo4-azure-endpoint", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, + }, + { + .name = "reset-repo4-azure-endpoint", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureEndpoint, + }, // repo-azure-host option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8513,6 +8602,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-host", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, }, + { + .name = "repo2-azure-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, + }, + { + .name = "reset-repo2-azure-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, + }, + { + .name = "repo3-azure-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, + }, + { + .name = "reset-repo3-azure-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, + }, + { + .name = "repo4-azure-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, + }, + { + .name = "reset-repo4-azure-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureHost, + }, // repo-azure-key option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8525,6 +8641,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-key", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, }, + { + .name = "repo2-azure-key", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, + }, + { + .name = "reset-repo2-azure-key", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, + }, + { + .name = "repo3-azure-key", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, + }, + { + .name = "reset-repo3-azure-key", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, + }, + { + .name = "repo4-azure-key", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, + }, + { + .name = "reset-repo4-azure-key", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKey, + }, // repo-azure-key-type option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8537,6 +8680,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-key-type", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, }, + { + .name = "repo2-azure-key-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, + }, + { + .name = "reset-repo2-azure-key-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, + }, + { + .name = "repo3-azure-key-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, + }, + { + .name = "reset-repo3-azure-key-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, + }, + { + .name = "repo4-azure-key-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, + }, + { + .name = "reset-repo4-azure-key-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureKeyType, + }, // repo-azure-port option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8549,6 +8719,33 @@ static const struct option optionList[] = .name = "reset-repo1-azure-port", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, }, + { + .name = "repo2-azure-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, + }, + { + .name = "reset-repo2-azure-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, + }, + { + .name = "repo3-azure-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, + }, + { + .name = "reset-repo3-azure-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, + }, + { + .name = "repo4-azure-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, + }, + { + .name = "reset-repo4-azure-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzurePort, + }, // repo-azure-verify-tls option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8564,6 +8761,42 @@ static const struct option optionList[] = .name = "reset-repo1-azure-verify-tls", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, }, + { + .name = "repo2-azure-verify-tls", + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "no-repo2-azure-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "reset-repo2-azure-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "repo3-azure-verify-tls", + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "no-repo3-azure-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "reset-repo3-azure-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "repo4-azure-verify-tls", + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "no-repo4-azure-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, + { + .name = "reset-repo4-azure-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoAzureVerifyTls, + }, // repo-cipher-pass option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8581,6 +8814,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, }, + { + .name = "repo2-cipher-pass", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, + }, + { + .name = "reset-repo2-cipher-pass", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, + }, + { + .name = "repo3-cipher-pass", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, + }, + { + .name = "reset-repo3-cipher-pass", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, + }, + { + .name = "repo4-cipher-pass", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, + }, + { + .name = "reset-repo4-cipher-pass", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherPass, + }, // repo-cipher-type option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8598,6 +8858,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, }, + { + .name = "repo2-cipher-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, + }, + { + .name = "reset-repo2-cipher-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, + }, + { + .name = "repo3-cipher-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, + }, + { + .name = "reset-repo3-cipher-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, + }, + { + .name = "repo4-cipher-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, + }, + { + .name = "reset-repo4-cipher-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoCipherType, + }, // repo-hardlink option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8621,6 +8908,42 @@ static const struct option optionList[] = .name = "no-hardlink", .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | PARSE_NEGATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, }, + { + .name = "repo2-hardlink", + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "no-repo2-hardlink", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "reset-repo2-hardlink", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "repo3-hardlink", + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "no-repo3-hardlink", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "reset-repo3-hardlink", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "repo4-hardlink", + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "no-repo4-hardlink", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, + { + .name = "reset-repo4-hardlink", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHardlink, + }, // repo-host option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8638,6 +8961,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, }, + { + .name = "repo2-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, + }, + { + .name = "reset-repo2-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, + }, + { + .name = "repo3-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, + }, + { + .name = "reset-repo3-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, + }, + { + .name = "repo4-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, + }, + { + .name = "reset-repo4-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHost, + }, // repo-host-cmd option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8655,6 +9005,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, }, + { + .name = "repo2-host-cmd", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, + }, + { + .name = "reset-repo2-host-cmd", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, + }, + { + .name = "repo3-host-cmd", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, + }, + { + .name = "reset-repo3-host-cmd", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, + }, + { + .name = "repo4-host-cmd", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, + }, + { + .name = "reset-repo4-host-cmd", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostCmd, + }, // repo-host-config option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8672,6 +9049,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, }, + { + .name = "repo2-host-config", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, + }, + { + .name = "reset-repo2-host-config", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, + }, + { + .name = "repo3-host-config", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, + }, + { + .name = "reset-repo3-host-config", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, + }, + { + .name = "repo4-host-config", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, + }, + { + .name = "reset-repo4-host-config", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfig, + }, // repo-host-config-include-path option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8684,6 +9088,33 @@ static const struct option optionList[] = .name = "reset-repo1-host-config-include-path", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, }, + { + .name = "repo2-host-config-include-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, + }, + { + .name = "reset-repo2-host-config-include-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, + }, + { + .name = "repo3-host-config-include-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, + }, + { + .name = "reset-repo3-host-config-include-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, + }, + { + .name = "repo4-host-config-include-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, + }, + { + .name = "reset-repo4-host-config-include-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigIncludePath, + }, // repo-host-config-path option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8696,6 +9127,33 @@ static const struct option optionList[] = .name = "reset-repo1-host-config-path", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, }, + { + .name = "repo2-host-config-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, + }, + { + .name = "reset-repo2-host-config-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, + }, + { + .name = "repo3-host-config-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, + }, + { + .name = "reset-repo3-host-config-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, + }, + { + .name = "repo4-host-config-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, + }, + { + .name = "reset-repo4-host-config-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostConfigPath, + }, // repo-host-port option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8713,6 +9171,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, }, + { + .name = "repo2-host-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, + }, + { + .name = "reset-repo2-host-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, + }, + { + .name = "repo3-host-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, + }, + { + .name = "reset-repo3-host-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, + }, + { + .name = "repo4-host-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, + }, + { + .name = "reset-repo4-host-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostPort, + }, // repo-host-user option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8730,6 +9215,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, }, + { + .name = "repo2-host-user", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, + }, + { + .name = "reset-repo2-host-user", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, + }, + { + .name = "repo3-host-user", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, + }, + { + .name = "reset-repo3-host-user", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, + }, + { + .name = "repo4-host-user", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, + }, + { + .name = "reset-repo4-host-user", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoHostUser, + }, // repo-local option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8745,6 +9257,42 @@ static const struct option optionList[] = .name = "reset-repo1-local", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, }, + { + .name = "repo2-local", + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "no-repo2-local", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "reset-repo2-local", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "repo3-local", + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "no-repo3-local", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "reset-repo3-local", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "repo4-local", + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "no-repo4-local", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, + { + .name = "reset-repo4-local", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoLocal, + }, // repo-path option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8762,6 +9310,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, }, + { + .name = "repo2-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, + }, + { + .name = "reset-repo2-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, + }, + { + .name = "repo3-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, + }, + { + .name = "reset-repo3-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, + }, + { + .name = "repo4-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, + }, + { + .name = "reset-repo4-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoPath, + }, // repo-retention-archive option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8779,6 +9354,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, }, + { + .name = "repo2-retention-archive", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, + }, + { + .name = "reset-repo2-retention-archive", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, + }, + { + .name = "repo3-retention-archive", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, + }, + { + .name = "reset-repo3-retention-archive", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, + }, + { + .name = "repo4-retention-archive", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, + }, + { + .name = "reset-repo4-retention-archive", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchive, + }, // repo-retention-archive-type option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8796,6 +9398,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, }, + { + .name = "repo2-retention-archive-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, + }, + { + .name = "reset-repo2-retention-archive-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, + }, + { + .name = "repo3-retention-archive-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, + }, + { + .name = "reset-repo3-retention-archive-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, + }, + { + .name = "repo4-retention-archive-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, + }, + { + .name = "reset-repo4-retention-archive-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionArchiveType, + }, // repo-retention-diff option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8813,6 +9442,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, }, + { + .name = "repo2-retention-diff", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, + }, + { + .name = "reset-repo2-retention-diff", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, + }, + { + .name = "repo3-retention-diff", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, + }, + { + .name = "reset-repo3-retention-diff", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, + }, + { + .name = "repo4-retention-diff", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, + }, + { + .name = "reset-repo4-retention-diff", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionDiff, + }, // repo-retention-full option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8830,6 +9486,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, }, + { + .name = "repo2-retention-full", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, + }, + { + .name = "reset-repo2-retention-full", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, + }, + { + .name = "repo3-retention-full", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, + }, + { + .name = "reset-repo3-retention-full", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, + }, + { + .name = "repo4-retention-full", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, + }, + { + .name = "reset-repo4-retention-full", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFull, + }, // repo-retention-full-type option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8842,6 +9525,33 @@ static const struct option optionList[] = .name = "reset-repo1-retention-full-type", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, }, + { + .name = "repo2-retention-full-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, + }, + { + .name = "reset-repo2-retention-full-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, + }, + { + .name = "repo3-retention-full-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, + }, + { + .name = "reset-repo3-retention-full-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, + }, + { + .name = "repo4-retention-full-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, + }, + { + .name = "reset-repo4-retention-full-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoRetentionFullType, + }, // repo-s3-bucket option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8859,6 +9569,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, }, + { + .name = "repo2-s3-bucket", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, + }, + { + .name = "reset-repo2-s3-bucket", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, + }, + { + .name = "repo3-s3-bucket", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, + }, + { + .name = "reset-repo3-s3-bucket", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, + }, + { + .name = "repo4-s3-bucket", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, + }, + { + .name = "reset-repo4-s3-bucket", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Bucket, + }, // repo-s3-ca-file option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8876,6 +9613,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, }, + { + .name = "repo2-s3-ca-file", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, + }, + { + .name = "reset-repo2-s3-ca-file", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, + }, + { + .name = "repo3-s3-ca-file", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, + }, + { + .name = "reset-repo3-s3-ca-file", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, + }, + { + .name = "repo4-s3-ca-file", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, + }, + { + .name = "reset-repo4-s3-ca-file", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaFile, + }, // repo-s3-ca-path option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8893,6 +9657,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, }, + { + .name = "repo2-s3-ca-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, + }, + { + .name = "reset-repo2-s3-ca-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, + }, + { + .name = "repo3-s3-ca-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, + }, + { + .name = "reset-repo3-s3-ca-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, + }, + { + .name = "repo4-s3-ca-path", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, + }, + { + .name = "reset-repo4-s3-ca-path", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3CaPath, + }, // repo-s3-endpoint option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8910,6 +9701,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, }, + { + .name = "repo2-s3-endpoint", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, + }, + { + .name = "reset-repo2-s3-endpoint", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, + }, + { + .name = "repo3-s3-endpoint", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, + }, + { + .name = "reset-repo3-s3-endpoint", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, + }, + { + .name = "repo4-s3-endpoint", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, + }, + { + .name = "reset-repo4-s3-endpoint", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Endpoint, + }, // repo-s3-host option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8927,6 +9745,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, }, + { + .name = "repo2-s3-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, + }, + { + .name = "reset-repo2-s3-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, + }, + { + .name = "repo3-s3-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, + }, + { + .name = "reset-repo3-s3-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, + }, + { + .name = "repo4-s3-host", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, + }, + { + .name = "reset-repo4-s3-host", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Host, + }, // repo-s3-key option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8944,6 +9789,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, }, + { + .name = "repo2-s3-key", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, + }, + { + .name = "reset-repo2-s3-key", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, + }, + { + .name = "repo3-s3-key", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, + }, + { + .name = "reset-repo3-s3-key", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, + }, + { + .name = "repo4-s3-key", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, + }, + { + .name = "reset-repo4-s3-key", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Key, + }, // repo-s3-key-secret option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -8961,6 +9833,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, }, + { + .name = "repo2-s3-key-secret", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, + }, + { + .name = "reset-repo2-s3-key-secret", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, + }, + { + .name = "repo3-s3-key-secret", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, + }, + { + .name = "reset-repo3-s3-key-secret", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, + }, + { + .name = "repo4-s3-key-secret", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, + }, + { + .name = "reset-repo4-s3-key-secret", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeySecret, + }, // repo-s3-key-type option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8973,6 +9872,33 @@ static const struct option optionList[] = .name = "reset-repo1-s3-key-type", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, }, + { + .name = "repo2-s3-key-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, + }, + { + .name = "reset-repo2-s3-key-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, + }, + { + .name = "repo3-s3-key-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, + }, + { + .name = "reset-repo3-s3-key-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, + }, + { + .name = "repo4-s3-key-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, + }, + { + .name = "reset-repo4-s3-key-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3KeyType, + }, // repo-s3-port option // ----------------------------------------------------------------------------------------------------------------------------- @@ -8985,6 +9911,33 @@ static const struct option optionList[] = .name = "reset-repo1-s3-port", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, }, + { + .name = "repo2-s3-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, + }, + { + .name = "reset-repo2-s3-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, + }, + { + .name = "repo3-s3-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, + }, + { + .name = "reset-repo3-s3-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, + }, + { + .name = "repo4-s3-port", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, + }, + { + .name = "reset-repo4-s3-port", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Port, + }, // repo-s3-region option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -9002,6 +9955,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, }, + { + .name = "repo2-s3-region", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, + }, + { + .name = "reset-repo2-s3-region", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, + }, + { + .name = "repo3-s3-region", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, + }, + { + .name = "reset-repo3-s3-region", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, + }, + { + .name = "repo4-s3-region", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, + }, + { + .name = "reset-repo4-s3-region", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Region, + }, // repo-s3-role option // ----------------------------------------------------------------------------------------------------------------------------- @@ -9014,6 +9994,33 @@ static const struct option optionList[] = .name = "reset-repo1-s3-role", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, }, + { + .name = "repo2-s3-role", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, + }, + { + .name = "reset-repo2-s3-role", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, + }, + { + .name = "repo3-s3-role", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, + }, + { + .name = "reset-repo3-s3-role", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, + }, + { + .name = "repo4-s3-role", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, + }, + { + .name = "reset-repo4-s3-role", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Role, + }, // repo-s3-token option // ----------------------------------------------------------------------------------------------------------------------------- @@ -9026,6 +10033,33 @@ static const struct option optionList[] = .name = "reset-repo1-s3-token", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, }, + { + .name = "repo2-s3-token", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, + }, + { + .name = "reset-repo2-s3-token", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, + }, + { + .name = "repo3-s3-token", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, + }, + { + .name = "reset-repo3-s3-token", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, + }, + { + .name = "repo4-s3-token", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, + }, + { + .name = "reset-repo4-s3-token", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3Token, + }, // repo-s3-uri-style option // ----------------------------------------------------------------------------------------------------------------------------- @@ -9038,6 +10072,33 @@ static const struct option optionList[] = .name = "reset-repo1-s3-uri-style", .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, }, + { + .name = "repo2-s3-uri-style", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, + }, + { + .name = "reset-repo2-s3-uri-style", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, + }, + { + .name = "repo3-s3-uri-style", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, + }, + { + .name = "reset-repo3-s3-uri-style", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, + }, + { + .name = "repo4-s3-uri-style", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, + }, + { + .name = "reset-repo4-s3-uri-style", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3UriStyle, + }, // repo-s3-verify-tls option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -9069,6 +10130,42 @@ static const struct option optionList[] = .name = "no-repo1-s3-verify-ssl", .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | PARSE_NEGATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, }, + { + .name = "repo2-s3-verify-tls", + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "no-repo2-s3-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "reset-repo2-s3-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "repo3-s3-verify-tls", + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "no-repo3-s3-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "reset-repo3-s3-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "repo4-s3-verify-tls", + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "no-repo4-s3-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, + { + .name = "reset-repo4-s3-verify-tls", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoS3VerifyTls, + }, // repo-type option and deprecations // ----------------------------------------------------------------------------------------------------------------------------- @@ -9086,6 +10183,33 @@ static const struct option optionList[] = .has_arg = required_argument, .val = PARSE_OPTION_FLAG | PARSE_DEPRECATE_FLAG | (0 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, }, + { + .name = "repo2-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, + }, + { + .name = "reset-repo2-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (1 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, + }, + { + .name = "repo3-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, + }, + { + .name = "reset-repo3-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (2 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, + }, + { + .name = "repo4-type", + .has_arg = required_argument, + .val = PARSE_OPTION_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, + }, + { + .name = "reset-repo4-type", + .val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | (3 << PARSE_KEY_IDX_SHIFT) | cfgOptRepoType, + }, // resume option // ----------------------------------------------------------------------------------------------------------------------------- @@ -9362,6 +10486,7 @@ static const ConfigOption optionResolveOrder[] = cfgOptRaw, cfgOptRecurse, cfgOptRemoteType, + cfgOptRepo, cfgOptRepoCipherType, cfgOptRepoHardlink, cfgOptRepoLocal, diff --git a/src/config/parse.c b/src/config/parse.c index 89ce24c85..c1f76e9bf 100644 --- a/src/config/parse.c +++ b/src/config/parse.c @@ -1864,15 +1864,22 @@ configParse(unsigned int argListSize, const char *argList[], bool resetLogLevel) { ASSERT(groupId == cfgOptGrpPg || groupId == cfgOptGrpRepo); - // The repo default is always key 1 since only one is allowed - if (groupId == cfgOptGrpRepo) - continue; + // Get the group default option + unsigned int defaultOptionId = groupId == cfgOptGrpPg ? cfgOptPg : cfgOptRepo; + + // Does a default always exist? + config->optionGroup[groupId].indexDefaultExists = + // A default always exists for the pg group + groupId == cfgOptGrpPg || + // The repo group allows a default when the repo option is valid, i.e. either repo1 is the only key set or a repo + // is specified + cfgOptionValid(cfgOptRepo); // Does the group default option exist? - if (cfgOptionTest(cfgOptPg)) + if (cfgOptionTest(defaultOptionId)) { // Search for the key - unsigned int optionKeyIdx = cfgOptionUInt(cfgOptPg) - 1; + unsigned int optionKeyIdx = cfgOptionUInt(defaultOptionId) - 1; unsigned int index = 0; for (; index < cfgOptionGroupIdxTotal(groupId); index++) @@ -1885,12 +1892,13 @@ configParse(unsigned int argListSize, const char *argList[], bool resetLogLevel) if (index == cfgOptionGroupIdxTotal(groupId)) { THROW_FMT( - OptionInvalidValueError, "key '%u' is not valid for '%s' option", cfgOptionUInt(cfgOptPg), - cfgOptionName(cfgOptPg)); + OptionInvalidValueError, "key '%u' is not valid for '%s' option", cfgOptionUInt(defaultOptionId), + cfgOptionName(defaultOptionId)); } // Set the default config->optionGroup[groupId].indexDefault = index; + config->optionGroup[groupId].indexDefaultExists = true; } } } diff --git a/src/protocol/helper.c b/src/protocol/helper.c index 4318e0564..9a9276205 100644 --- a/src/protocol/helper.c +++ b/src/protocol/helper.c @@ -85,7 +85,18 @@ repoIsLocalVerify(void) { FUNCTION_TEST_VOID(); - if (!repoIsLocal(cfgOptionGroupIdxDefault(cfgOptGrpRepo))) + repoIsLocalVerifyIdx(cfgOptionGroupIdxDefault(cfgOptGrpRepo)); + + FUNCTION_TEST_RETURN_VOID(); +} + +/**********************************************************************************************************************************/ +void +repoIsLocalVerifyIdx(unsigned int repoIdx) +{ + FUNCTION_TEST_VOID(); + + if (!repoIsLocal(repoIdx)) THROW_FMT(HostInvalidError, "%s command must be run on the repository host", cfgCommandName(cfgCommand())); FUNCTION_TEST_RETURN_VOID(); @@ -136,7 +147,8 @@ protocolLocalParam(ProtocolStorageType protocolStorageType, unsigned int hostIdx // Add the process id -- used when more than one process will be called kvPut(optionReplace, VARSTR(CFGOPT_PROCESS_STR), VARUINT(processId)); - // Add the group default id + // Add the pg default. Don't do this for repos because the repo default should come from the user or the local should + // handle all the repos equally. Repos don't get special handling like pg primaries or standbys. if (protocolStorageType == protocolStorageTypePg) kvPut(optionReplace, VARSTRDEF(CFGOPT_PG), VARUINT(cfgOptionGroupIdxToKey(cfgOptGrpPg, hostIdx))); @@ -389,6 +401,10 @@ protocolRemoteParam(ProtocolStorageType protocolStorageType, unsigned int hostId } } + // Set repo default so the remote only operates on a single repo + if (protocolStorageType == protocolStorageTypeRepo) + kvPut(optionReplace, VARSTRDEF(CFGOPT_REPO), VARUINT(cfgOptionGroupIdxToKey(cfgOptGrpRepo, hostIdx))); + // Add the process id if not set. This means that the remote is being started from the main process and should always get a // process id of 0. if (!cfgOptionTest(cfgOptProcess)) @@ -479,19 +495,19 @@ protocolRemoteGet(ProtocolStorageType protocolStorageType, unsigned int hostIdx) PROTOCOL_SERVICE_REMOTE_STR, execIoRead(protocolHelperClient->exec), execIoWrite(protocolHelperClient->exec)); // Get cipher options from the remote if none are locally configured - if (isRepo && strEq(cfgOptionStr(cfgOptRepoCipherType), CIPHER_TYPE_NONE_STR)) + if (isRepo && strEq(cfgOptionIdxStr(cfgOptRepoCipherType, hostIdx), CIPHER_TYPE_NONE_STR)) { // Options to query VariantList *param = varLstNew(); - varLstAdd(param, varNewStrZ(cfgOptionName(cfgOptRepoCipherType))); - varLstAdd(param, varNewStrZ(cfgOptionName(cfgOptRepoCipherPass))); + varLstAdd(param, varNewStrZ(cfgOptionIdxName(cfgOptRepoCipherType, hostIdx))); + varLstAdd(param, varNewStrZ(cfgOptionIdxName(cfgOptRepoCipherPass, hostIdx))); VariantList *optionList = configProtocolOption(protocolHelperClient->client, param); if (!strEq(varStr(varLstGet(optionList, 0)), CIPHER_TYPE_NONE_STR)) { - cfgOptionSet(cfgOptRepoCipherType, cfgSourceConfig, varLstGet(optionList, 0)); - cfgOptionSet(cfgOptRepoCipherPass, cfgSourceConfig, varLstGet(optionList, 1)); + cfgOptionIdxSet(cfgOptRepoCipherType, hostIdx, cfgSourceConfig, varLstGet(optionList, 0)); + cfgOptionIdxSet(cfgOptRepoCipherPass, hostIdx, cfgSourceConfig, varLstGet(optionList, 1)); } } diff --git a/src/protocol/helper.h b/src/protocol/helper.h index 1bdb345eb..082472f2a 100644 --- a/src/protocol/helper.h +++ b/src/protocol/helper.h @@ -58,6 +58,7 @@ bool repoIsLocal(unsigned int repoIdx); // Error if the repository is not local void repoIsLocalVerify(void); +void repoIsLocalVerifyIdx(unsigned int repoIdx); // Get enum/string for protocol storage type ProtocolStorageType protocolStorageTypeEnum(const String *type); diff --git a/test/define.yaml b/test/define.yaml index 77119085a..32aab2ea1 100644 --- a/test/define.yaml +++ b/test/define.yaml @@ -687,7 +687,7 @@ unit: # ---------------------------------------------------------------------------------------------------------------------------- - name: stanza - total: 5 + total: 4 coverage: - command/stanza/common diff --git a/test/expect/mock-all-001.log b/test/expect/mock-all-001.log index f493d2362..056acab32 100644 --- a/test/expect/mock-all-001.log +++ b/test/expect/mock-all-001.log @@ -17,6 +17,7 @@ stanza-create db - create required data for stanza (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 INFO: stanza-create command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -56,9 +57,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" full backup - create pg_stat link, pg_clog dir (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --manifest-save-threshold=3 --buffer-size=[BUFFER-SIZE] --checksum-page --process-max=1 --repo1-type=cifs --type=full --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --manifest-save-threshold=3 --buffer-size=[BUFFER-SIZE] --checksum-page --process-max=1 --repo1-type=cifs --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --checksum-page --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --manifest-save-threshold=3 --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --process-max=1 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-type=cifs --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --checksum-page --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --manifest-save-threshold=3 --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --process-max=1 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-type=cifs --stanza=db --start-fast --type=full P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P01 INFO: backup file [TEST_PATH]/db-primary/db/base/base/32768/33001 (64KB, 33%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b @@ -81,7 +82,7 @@ P01 INFO: backup file [TEST_PATH]/db-primary/db/base/special-!_.*'()&!@;:+,? ( P00 INFO: full backup size = 192KB P00 INFO: new backup label = [BACKUP-FULL-1] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --repo1-type=cifs --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-type=cifs --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -239,9 +240,9 @@ P00 INFO: stop command begin [BACKREST-VERSION]: --config=[TEST_PATH]/db-prima P00 INFO: stop command end: completed successfully full backup - global stop (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --type=full --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 ERROR: [062]: stop file exists for all stanzas @@ -261,9 +262,9 @@ P00 WARN: stop file already exists for stanza db P00 INFO: stop command end: completed successfully full backup - stanza stop (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --type=full --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 ERROR: [062]: stop file exists for stanza db @@ -289,9 +290,9 @@ P00 WARN: stop file does not exist P00 INFO: start command end: completed successfully full backup - resume (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --force --checksum-page --delta --type=full --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --force --checksum-page --delta --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --checksum-page --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exclude=postgresql.auto.conf --exclude=pg_log/ --exclude=pg_log2 --exclude=apipe --exec-id=[EXEC-ID] --force --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --checksum-page --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exclude=postgresql.auto.conf --exclude=pg_log/ --exclude=pg_log2 --exclude=apipe --exec-id=[EXEC-ID] --force --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-FULL-1]' missing manifest removed from backup.info @@ -325,7 +326,7 @@ P01 INFO: backup file [TEST_PATH]/db-primary/db/base/special-!_.*'()&!@;:+,? ( P00 INFO: full backup size = 192KB P00 INFO: new backup label = [BACKUP-FULL-2] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -485,9 +486,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" restore delta, backup '[BACKUP-FULL-2]' - add and delete files (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --link-all --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --link-all --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db P00 INFO: restore backup set [BACKUP-FULL-2] P00 WARN: unknown user in backup manifest mapped to '[USER-2]' P00 WARN: unknown group in backup manifest mapped to '[GROUP-2]' @@ -567,12 +568,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, backup '[BACKUP-FULL-2]' - fix permissions (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --log-level-console=detail --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --log-level-console=detail --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --link-all --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --link-all --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db P00 INFO: restore backup set [BACKUP-FULL-2] P00 WARN: unknown user in backup manifest mapped to '[USER-1]' P00 WARN: unknown group in backup manifest mapped to '[GROUP-1]' @@ -628,12 +629,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, backup '[BACKUP-FULL-2]' - fix broken symlink (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --link-all --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --link-all --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db P00 INFO: restore backup set [BACKUP-FULL-2] P00 WARN: unknown user in backup manifest mapped to current user P00 WARN: unknown group in backup manifest mapped to current group @@ -689,12 +690,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, force, backup '[BACKUP-FULL-2]' - restore links as directories (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --force --set=[BACKUP-FULL-2] --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --force --set=[BACKUP-FULL-2] --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --force --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --force --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-FULL-2] --stanza=db P00 INFO: restore backup set [BACKUP-FULL-2] P00 WARN: file link 'pg_hba.conf' will be restored as a file at the same location P00 WARN: contents of directory link 'pg_stat' will be restored in a directory at the same location @@ -753,12 +754,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' incr backup - add tablespace 1 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] @@ -788,7 +789,7 @@ P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: incr backup size = 22B P00 INFO: new backup label = [BACKUP-INCR-1] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -949,9 +950,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" incr backup - resume and add tablespace 2 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --process-max=1 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --process-max=1 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-INCR-1]' missing manifest removed from backup.info @@ -1003,7 +1004,7 @@ P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: incr backup size = 192KB P00 INFO: new backup label = [BACKUP-INCR-2] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -1174,9 +1175,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" diff backup - drop tablespace 11 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --type=diff --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --process-max=1 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --process-max=1 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] @@ -1221,7 +1222,7 @@ P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 192KB P00 INFO: new backup label = [BACKUP-DIFF-1] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -1388,9 +1389,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" restore, backup '[BACKUP-DIFF-1]', remap - remap all paths (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --set=[BACKUP-DIFF-1] --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --set=[BACKUP-DIFF-1] --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-DIFF-1] --stanza=db --tablespace-map=1=[TEST_PATH]/db-primary/db/tablespace/ts1-2 --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-DIFF-1] --stanza=db --tablespace-map=1=[TEST_PATH]/db-primary/db/tablespace/ts1-2 --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 P00 INFO: restore backup set [BACKUP-DIFF-1] P00 INFO: remap data directory to '[TEST_PATH]/db-primary/db/base-2' P00 INFO: map tablespace 'pg_tblspc/1' to '[TEST_PATH]/db-primary/db/tablespace/ts1-2' @@ -1470,12 +1471,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, backup '[BACKUP-DIFF-1]', remap - ensure file in tblspc root remains after --delta (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-DIFF-1] --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-DIFF-1] --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-DIFF-1] --stanza=db --tablespace-map=1=[TEST_PATH]/db-primary/db/tablespace/ts1-2 --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --set=[BACKUP-DIFF-1] --stanza=db --tablespace-map=1=[TEST_PATH]/db-primary/db/tablespace/ts1-2 --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 P00 INFO: restore backup set [BACKUP-DIFF-1] P00 INFO: remap data directory to '[TEST_PATH]/db-primary/db/base-2' P00 INFO: map tablespace 'pg_tblspc/1' to '[TEST_PATH]/db-primary/db/tablespace/ts1-2' @@ -1539,12 +1540,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' incr backup - add files and remove tablespace 2 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --process-max=1 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --process-max=1 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-DIFF-1], version = 0.00 @@ -1575,7 +1576,7 @@ P00 DETAIL: reference pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt to [BACKUP-D P00 INFO: incr backup size = 13B P00 INFO: new backup label = [BACKUP-INCR-3] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -1743,9 +1744,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" incr backup - update files (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-INCR-3], version = [VERSION-1] @@ -1795,7 +1796,7 @@ P00 DETAIL: reference pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt to [BACKUP- P00 INFO: incr backup size = 176KB P00 INFO: new backup label = [BACKUP-INCR-4] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -1964,9 +1965,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" diff backup - updates since last full (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --type=diff --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --process-max=1 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --process-max=1 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] @@ -2012,7 +2013,7 @@ P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 176KB P00 INFO: new backup label = [BACKUP-DIFF-2] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -2182,9 +2183,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" diff backup - remove files (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --type=diff --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --process-max=1 --delta --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --process-max=1 --protocol-timeout=60 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --process-max=1 --protocol-timeout=60 --repo=1 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] @@ -2229,7 +2230,7 @@ P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 176KB P00 INFO: new backup label = [BACKUP-DIFF-3] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -2399,9 +2400,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" full backup - update file (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --type=full --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=full P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P01 INFO: backup file [TEST_PATH]/db-primary/db/base-2/base/32768/33001 (64KB, 36%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b @@ -2428,7 +2429,7 @@ P01 INFO: backup file [TEST_PATH]/db-primary/db/base-2/pg_tblspc/2/[TS_PATH-1] P00 INFO: full backup size = 176KB P00 INFO: new backup label = [BACKUP-FULL-3] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -2599,9 +2600,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" expire full=1 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo1-retention-full=1 --stanza=db expire +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo1-retention-full=1 --repo=1 --stanza=db expire ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=1 --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=1 --stanza=db P00 INFO: expire full backup set: [BACKUP-FULL-2], [BACKUP-INCR-2], [BACKUP-DIFF-1], [BACKUP-INCR-3], [BACKUP-INCR-4], [BACKUP-DIFF-2], [BACKUP-DIFF-3] P00 INFO: remove expired backup [BACKUP-DIFF-3] P00 INFO: remove expired backup [BACKUP-DIFF-2] @@ -2613,9 +2614,9 @@ P00 INFO: remove expired backup [BACKUP-FULL-2] P00 INFO: expire command end: completed successfully diff backup - add file (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --checksum-page --type=diff --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --checksum-page --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --checksum-page --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --checksum-page --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-3], version = [VERSION-1] @@ -2644,7 +2645,7 @@ P00 DETAIL: hardlink pg_tblspc/2/[TS_PATH-1]/32768/tablespace2c.txt to [BACKUP-F P00 INFO: diff backup size = 9B P00 INFO: new backup label = [BACKUP-DIFF-4] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully @@ -2811,9 +2812,9 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" restore delta, remap - selective restore 16384 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=16384 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=16384 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-include=16384 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-include=16384 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 P00 INFO: restore backup set [BACKUP-DIFF-4] P00 INFO: map tablespace 'pg_tblspc/2' to '[TEST_PATH]/db-primary/db/tablespace/ts2-2' P00 DETAIL: databases found for selective restore (1, 16384, 32768) @@ -2872,12 +2873,12 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, remap - selective restore 32768 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=32768 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=32768 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-include=32768 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-include=32768 --delta --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2 --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --tablespace-map=2=[TEST_PATH]/db-primary/db/tablespace/ts2-2 P00 INFO: restore backup set [BACKUP-DIFF-4] P00 INFO: map tablespace 'pg_tblspc/2' to '[TEST_PATH]/db-primary/db/tablespace/ts2-2' P00 DETAIL: databases found for selective restore (1, 16384, 32768) @@ -2936,22 +2937,22 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, remap, expect exit 80 - error on invalid id (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=7777 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=7777 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ P00 ERROR: [080]: database to include '7777' does not exist restore delta, remap, expect exit 81 - error on system id (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=1 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=1 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ P00 ERROR: [081]: system databases (template0, postgres, etc.) are included by default restore, remap - no tablespace remap (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --tablespace-map-all=../../tablespace --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --tablespace-map-all=../../tablespace --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --tablespace-map-all=../../tablespace +P00 INFO: restore command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base-2/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --tablespace-map-all=../../tablespace P00 INFO: restore backup set [BACKUP-DIFF-4] P00 INFO: remap data directory to '[TEST_PATH]/db-primary/db/base-2/base' P00 INFO: map tablespace 'pg_tblspc/2' to '../../tablespace/ts2' @@ -3025,7 +3026,7 @@ P00 INFO: restore command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/db/base-2/base/recovery.conf ------------------------------------------------------------------------ # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' > ls -1Rtr [TEST_PATH]/db-primary/repo/backup/db/backup.history ------------------------------------------------------------------------------------------------------------------------------------ @@ -3046,9 +3047,9 @@ restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.con [BACKUP-DIFF-4].manifest.gz diff backup - option backup-standby reset - backup performed from primary (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --log-level-console=info --backup-standby --type=diff --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --no-online --log-level-console=info --backup-standby --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --backup-standby --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2/base --protocol-timeout=60 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --backup-standby --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base-2/base --protocol-timeout=60 --repo=1 --repo1-hardlink --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db --start-fast --type=diff P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary @@ -3057,7 +3058,7 @@ P01 INFO: backup file [TEST_PATH]/db-primary/db/base-2/base/base/base2.txt (9B P00 INFO: diff backup size = 9B P00 INFO: new backup label = [BACKUP-DIFF-5] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully diff --git a/test/expect/mock-all-002.log b/test/expect/mock-all-002.log index 84a5a5e4b..10752d571 100644 --- a/test/expect/mock-all-002.log +++ b/test/expect/mock-all-002.log @@ -48,7 +48,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" full backup - create pg_stat link, pg_clog dir (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --manifest-save-threshold=3 --cmd-ssh=/usr/bin/ssh --pg1-port=9999 --pg1-socket-path=/test_socket_path --buffer-size=[BUFFER-SIZE] --checksum-page --process-max=1 --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --manifest-save-threshold=3 --cmd-ssh=/usr/bin/ssh --pg1-port=9999 --pg1-socket-path=/test_socket_path --buffer-size=[BUFFER-SIZE] --checksum-page --process-max=1 --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -242,7 +242,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" full backup - resume (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --force --checksum-page --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --force --checksum-page --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -445,25 +445,25 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" restore delta, backup '[BACKUP-FULL-2]' - add and delete files (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --cmd-ssh=/usr/bin/ssh --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --cmd-ssh=/usr/bin/ssh --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --cmd-ssh=/usr/bin/ssh --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --cmd-ssh=/usr/bin/ssh --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, backup '[BACKUP-FULL-2]' - fix broken symlink (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --compress-level-network=0 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --compress-level-network=0 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --compress-level-network=0 --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --compress-level-network=0 --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, force, backup '[BACKUP-FULL-2]' - restore links as directories (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --force --set=[BACKUP-FULL-2] --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --force --set=[BACKUP-FULL-2] --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: file link 'pg_hba.conf' will be restored as a file at the same location P00 WARN: contents of directory link 'pg_stat' will be restored in a directory at the same location @@ -474,10 +474,10 @@ P00 WARN: unknown group 'bogus' in backup manifest mapped to current group + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' incr backup - add tablespace 1 (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -680,7 +680,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" incr backup - resume and add tablespace 2 (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -897,7 +897,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" diff backup - drop tablespace 11 (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -1109,25 +1109,25 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" restore, backup '[BACKUP-DIFF-1]', remap - remap all paths (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --set=[BACKUP-DIFF-1] --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --set=[BACKUP-DIFF-1] --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, backup '[BACKUP-DIFF-1]', remap - ensure file in tblspc root remains after --delta (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-DIFF-1] --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-DIFF-1] --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' incr backup - add files and remove tablespace 2 (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -1338,7 +1338,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" incr backup - update files (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -1551,7 +1551,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" diff backup - updates since last full (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -1767,7 +1767,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" diff backup - remove files (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --process-max=1 --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -1982,7 +1982,7 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" full backup - update file (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -2193,11 +2193,11 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" expire full=1 (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --stanza=db expire +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --repo=1 --stanza=db expire ------------------------------------------------------------------------------------------------------------------------------------ diff backup - add file (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --checksum-page --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --checksum-page --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. @@ -2404,46 +2404,46 @@ db-version="9.4" backrest-checksum="[CHECKSUM]" restore delta, remap - selective restore 16384 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=16384 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=16384 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, remap - selective restore 32768 (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=32768 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --db-include=32768 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base-2/recovery.conf ------------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore delta, remap, expect exit 80 - error on invalid id (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=7777 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=7777 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ P00 ERROR: [080]: database to include '7777' does not exist restore delta, remap, expect exit 81 - error on system id (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=1 --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --log-level-console=warn --db-include=1 --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ P00 ERROR: [081]: system databases (template0, postgres, etc.) are included by default restore, remap - no tablespace remap (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --tablespace-map-all=../../tablespace --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --tablespace-map-all=../../tablespace --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base-2/base/recovery.conf ------------------------------------------------------------------------ # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' diff backup - option backup-standby reset - backup performed from primary (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --log-level-console=info --backup-standby --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --log-level-console=info --backup-standby --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --backup-standby --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/backup/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base-2/base --process-max=2 --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key= --repo1-s3-key-secret= --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --backup-standby --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/backup/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base-2/base --process-max=2 --protocol-timeout=60 --repo=1 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key= --repo1-s3-key-secret= --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db --start-fast --type=diff P00 WARN: option 'repo1-retention-full' is not set for 'repo1-retention-full-type=count', the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary @@ -2452,7 +2452,7 @@ P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base-2/base/base/ba P00 INFO: diff backup size = 9B P00 INFO: new backup label = [BACKUP-DIFF-5] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key= --repo1-s3-key-secret= --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=info --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --no-log-timestamp --repo=1 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key= --repo1-s3-key-secret= --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db P00 INFO: option 'repo1-retention-archive' is not set - archive logs will not be expired P00 INFO: expire command end: completed successfully diff --git a/test/expect/mock-archive-001.log b/test/expect/mock-archive-001.log index c133aa11b..6aeb5b147 100644 --- a/test/expect/mock-archive-001.log +++ b/test/expect/mock-archive-001.log @@ -29,6 +29,7 @@ stanza-create db - stanza create (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 INFO: stanza-create command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -112,7 +113,7 @@ P00 INFO: archive-get command end: aborted with exception [044] > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 9.4, system-id 5000900090001855000 +P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match repo1 stanza version 9.4, system-id 5000900090001855000 HINT: are you archiving to the correct stanza? P00 INFO: archive-push command end: aborted with exception [044] @@ -149,7 +150,7 @@ P00 INFO: start command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 WARN: WAL file '000000010000000100000002' already exists in the archive with the same checksum +P00 WARN: WAL file '000000010000000100000002' already exists in the repo1 archive with the same checksum HINT: this is valid in some recovery scenarios but may also indicate a problem. P00 INFO: pushed WAL file '000000010000000100000002' to the archive P00 INFO: archive-push command end: completed successfully @@ -157,7 +158,7 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 ERROR: [045]: WAL file '000000010000000100000002' already exists in the archive +P00 ERROR: [045]: WAL file '000000010000000100000002' already exists in the repo1 archive with a different checksum P00 INFO: archive-push command end: aborted with exception [045] > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get --archive-async --repo-type=cifs 000000010000000100000002 [TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG @@ -187,7 +188,7 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 WARN: WAL file '000000010000000100000002.partial' already exists in the archive with the same checksum +P00 WARN: WAL file '000000010000000100000002.partial' already exists in the repo1 archive with the same checksum HINT: this is valid in some recovery scenarios but may also indicate a problem. P00 INFO: pushed WAL file '000000010000000100000002.partial' to the archive P00 INFO: archive-push command end: completed successfully @@ -195,5 +196,5 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 ERROR: [045]: WAL file '000000010000000100000002.partial' already exists in the archive +P00 ERROR: [045]: WAL file '000000010000000100000002.partial' already exists in the repo1 archive with a different checksum P00 INFO: archive-push command end: aborted with exception [045] diff --git a/test/expect/mock-archive-002.log b/test/expect/mock-archive-002.log index cacca211e..7129f8f9d 100644 --- a/test/expect/mock-archive-002.log +++ b/test/expect/mock-archive-002.log @@ -29,6 +29,7 @@ stanza-create db - stanza create (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key= --repo1-s3-key-secret= --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-create command end: completed successfully @@ -107,7 +108,7 @@ P00 INFO: archive-get command end: aborted with exception [044] > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db -P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 9.4, system-id 5000900090001855000 +P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match repo1 stanza version 9.4, system-id 5000900090001855000 HINT: are you archiving to the correct stanza? P00 INFO: archive-push command end: aborted with exception [044] @@ -144,7 +145,7 @@ P00 INFO: start command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db -P00 WARN: WAL file '000000010000000100000002' already exists in the archive with the same checksum +P00 WARN: WAL file '000000010000000100000002' already exists in the repo1 archive with the same checksum HINT: this is valid in some recovery scenarios but may also indicate a problem. P00 INFO: pushed WAL file '000000010000000100000002' to the archive P00 INFO: archive-push command end: completed successfully @@ -152,7 +153,7 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db -P00 ERROR: [045]: WAL file '000000010000000100000002' already exists in the archive +P00 ERROR: [045]: WAL file '000000010000000100000002' already exists in the repo1 archive with a different checksum P00 INFO: archive-push command end: aborted with exception [045] > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get --cmd-ssh=/usr/bin/ssh --archive-async 000000010000000100000002 [TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG @@ -182,7 +183,7 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db -P00 WARN: WAL file '000000010000000100000002.partial' already exists in the archive with the same checksum +P00 WARN: WAL file '000000010000000100000002.partial' already exists in the repo1 archive with the same checksum HINT: this is valid in some recovery scenarios but may also indicate a problem. P00 INFO: pushed WAL file '000000010000000100000002.partial' to the archive P00 INFO: archive-push command end: completed successfully @@ -190,5 +191,5 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002.partial] --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=none --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db -P00 ERROR: [045]: WAL file '000000010000000100000002.partial' already exists in the archive +P00 ERROR: [045]: WAL file '000000010000000100000002.partial' already exists in the repo1 archive with a different checksum P00 INFO: archive-push command end: aborted with exception [045] diff --git a/test/expect/mock-archive-stop-001.log b/test/expect/mock-archive-stop-001.log index 2dcc4434e..b51892f63 100644 --- a/test/expect/mock-archive-stop-001.log +++ b/test/expect/mock-archive-stop-001.log @@ -5,6 +5,7 @@ stanza-create db - create required data for stanza (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 INFO: stanza-create command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -54,12 +55,12 @@ backrest-checksum="[CHECKSUM]" > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --log-level-console=warn --archive-push-queue-max=33554432 --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 ------------------------------------------------------------------------------------------------------------------------------------ -P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 8.0, system-id 1000000000000000094 +P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match repo1 stanza version 8.0, system-id 1000000000000000094 HINT: are you archiving to the correct stanza? > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --log-level-console=warn --archive-push-queue-max=33554432 --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000003 ------------------------------------------------------------------------------------------------------------------------------------ -P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 8.0, system-id 1000000000000000094 +P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match repo1 stanza version 8.0, system-id 1000000000000000094 HINT: are you archiving to the correct stanza? > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --log-level-console=warn --archive-push-queue-max=33554432 --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000004 --repo1-host=bogus diff --git a/test/expect/mock-archive-stop-002.log b/test/expect/mock-archive-stop-002.log index e3427d9c8..25f3d0d72 100644 --- a/test/expect/mock-archive-stop-002.log +++ b/test/expect/mock-archive-stop-002.log @@ -5,6 +5,7 @@ stanza-create db - create required data for stanza (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-create command end: completed successfully diff --git a/test/expect/mock-stanza-001.log b/test/expect/mock-stanza-001.log index 82b269af8..886037802 100644 --- a/test/expect/mock-stanza-001.log +++ b/test/expect/mock-stanza-001.log @@ -12,6 +12,7 @@ stanza-upgrade db - fail on stanza not initialized since archive.info is missing > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 ERROR: [055]: unable to load info file '[TEST_PATH]/db-primary/repo/archive/db/archive.info' or '[TEST_PATH]/db-primary/repo/archive/db/archive.info.copy': FileMissingError: unable to open missing file '[TEST_PATH]/db-primary/repo/archive/db/archive.info' for read FileMissingError: unable to open missing file '[TEST_PATH]/db-primary/repo/archive/db/archive.info.copy' for read @@ -25,6 +26,7 @@ stanza-create db - successfully create the stanza (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 INFO: stanza-create command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -67,7 +69,8 @@ stanza-create db - do not fail on rerun of stanza-create - info files exist and > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 INFO: stanza 'db' already exists and is valid +P00 INFO: stanza-create for stanza 'db' on repo1 +P00 INFO: stanza 'db' already exists on repo1 and is valid P00 INFO: stanza-create command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -111,6 +114,7 @@ stanza-create db - fail on database mismatch and warn force option deprecated (d ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --force --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 WARN: option --force is no longer supported +P00 INFO: stanza-create for stanza 'db' on repo1 P00 ERROR: [028]: backup and archive info files exist but do not match the database HINT: is this the correct stanza? HINT: did an error occur during stanza-upgrade? @@ -156,7 +160,8 @@ stanza-upgrade db - already up to date (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 INFO: stanza 'db' is already up to date +P00 INFO: stanza-upgrade for stanza 'db' on repo1 +P00 INFO: stanza 'db' on repo1 is already up to date P00 INFO: stanza-upgrade command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -205,7 +210,8 @@ stanza-create db - fail on archive info file missing from non-empty dir (db-prim > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db -P00 ERROR: [055]: backup.info exists but archive.info is missing +P00 INFO: stanza-create for stanza 'db' on repo1 +P00 ERROR: [055]: backup.info exists but archive.info is missing on repo1 HINT: this may be a symptom of repository corruption! P00 INFO: stanza-create command end: aborted with exception [055] @@ -236,13 +242,14 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --log-level-console=warn --archive-push-queue-max=33554432 --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000001 ------------------------------------------------------------------------------------------------------------------------------------ -P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 9.3, system-id 1000000000000000093 +P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match repo1 stanza version 9.3, system-id 1000000000000000093 HINT: are you archiving to the correct stanza? stanza-upgrade db - successful upgrade creates additional history (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 INFO: stanza-upgrade command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -293,9 +300,9 @@ P00 INFO: archive-get command end: completed successfully ------------------------------------------------------------------------------------------------------------------------------------ full backup - create first full backup (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo1-retention-full=2 --no-online --type=full --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo1-retention-full=2 --no-online --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db --start-fast --type=full P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG (16MB, 33%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 (16MB, 66%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000001 (16MB, 99%) checksum f92539dea1f9482e2946c1138eeeecdea29d7f19 @@ -305,7 +312,7 @@ P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/archive_status/00 P00 INFO: full backup size = 48MB P00 INFO: new backup label = [BACKUP-FULL-1] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db P00 INFO: expire command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/pgbackrest.conf @@ -338,6 +345,7 @@ stanza-upgrade db - successfully upgrade (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 INFO: stanza-upgrade command end: completed successfully + supplemental file: [TEST_PATH]/db-primary/repo/backup/db/backup.info @@ -387,6 +395,7 @@ stanza-upgrade db - upgrade fails with mismatched db-ids (db-primary host) > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 ERROR: [028]: backup info file and archive info file do not match archive: id = 2, version = 9.5, system-id = 1000000000000000095 backup : id = 3, version = 9.5, system-id = 1000000000000000095 @@ -438,9 +447,9 @@ backrest-checksum="[CHECKSUM]" ------------------------------------------------------------------------------------------------------------------------------------ diff backup - diff changed to full backup (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo1-retention-full=2 --no-online --type=diff --stanza=db backup +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo1-retention-full=2 --no-online --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-type=zst --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db --start-fast --type=diff P00 WARN: no prior backup exists, diff backup has been changed to full P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG (16MB, 33%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 (16MB, 66%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c @@ -451,7 +460,7 @@ P01 INFO: backup file [TEST_PATH]/db-primary/db/base/pg_xlog/archive_status/00 P00 INFO: full backup size = 48MB P00 INFO: new backup label = [BACKUP-FULL-2] P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log --no-log-timestamp --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --repo1-retention-full=2 --stanza=db P00 INFO: remove archive path: [TEST_PATH]/db-primary/repo/archive/db/9.3-1 P00 INFO: expire command end: completed successfully @@ -482,9 +491,9 @@ archive-copy=y start-fast=y stanza-delete db - fail on missing stop file (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db stanza-delete +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db stanza-delete ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 ERROR: [055]: stop file does not exist for stanza 'db' HINT: has the pgbackrest stop command been run on this server for this stanza? P00 INFO: stanza-delete command end: aborted with exception [055] @@ -506,9 +515,9 @@ P00 INFO: stop command begin [BACKREST-VERSION]: --config=[TEST_PATH]/db-prima P00 INFO: stop command end: completed successfully stanza-delete db - successfully delete the stanza (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db stanza-delete +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db stanza-delete ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db +P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/db-primary/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/db-primary/log[] --no-log-timestamp --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-path=[TEST_PATH]/db-primary/repo --stanza=db P00 INFO: stanza-delete command end: completed successfully db must not exist for successful delete diff --git a/test/expect/mock-stanza-002.log b/test/expect/mock-stanza-002.log index 0459f390b..d4df0f997 100644 --- a/test/expect/mock-stanza-002.log +++ b/test/expect/mock-stanza-002.log @@ -6,13 +6,13 @@ stanza-create db - fail on missing control file (backup host) ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db P00 ERROR: [055]: raised from remote-0 protocol on 'db-primary': unable to open missing file '[TEST_PATH]/db-primary/db/base/global/pg_control' for read -P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-create command end: aborted with exception [055] stanza-upgrade db - fail on stanza not initialized since archive.info is missing (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 ERROR: [055]: unable to load info file '/archive/db/archive.info' or '/archive/db/archive.info.copy': FileMissingError: unable to open '/archive/db/archive.info': No such file or directory FileMissingError: unable to open '/archive/db/archive.info.copy': No such file or directory @@ -27,6 +27,7 @@ stanza-create db - successfully create the stanza (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-create for stanza 'db' on repo1 P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-create command end: completed successfully @@ -76,7 +77,8 @@ stanza-create db - do not fail on rerun of stanza-create - info files exist and > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db -P00 INFO: stanza 'db' already exists and is valid +P00 INFO: stanza-create for stanza 'db' on repo1 +P00 INFO: stanza 'db' already exists on repo1 and is valid P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-create command end: completed successfully @@ -127,6 +129,7 @@ stanza-create db - fail on database mismatch and warn force option deprecated (b ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --force --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db P00 WARN: option --force is no longer supported +P00 INFO: stanza-create for stanza 'db' on repo1 P00 ERROR: [028]: backup and archive info files exist but do not match the database HINT: is this the correct stanza? HINT: did an error occur during stanza-upgrade? @@ -179,7 +182,8 @@ stanza-upgrade db - already up to date (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db -P00 INFO: stanza 'db' is already up to date +P00 INFO: stanza-upgrade for stanza 'db' on repo1 +P00 INFO: stanza 'db' on repo1 is already up to date P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-upgrade command end: completed successfully @@ -239,13 +243,14 @@ P00 INFO: archive-push command end: completed successfully > [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --log-level-console=warn --archive-push-queue-max=33554432 --stanza=db archive-push [TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000001 ------------------------------------------------------------------------------------------------------------------------------------ -P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 9.3, system-id 1000000000000000093 +P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match repo1 stanza version 9.3, system-id 1000000000000000093 HINT: are you archiving to the correct stanza? stanza-upgrade db - successful upgrade creates additional history (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-upgrade command end: completed successfully @@ -303,9 +308,9 @@ P00 INFO: archive-get command end: completed successfully ------------------------------------------------------------------------------------------------------------------------------------ full backup - create first full backup (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=2 --no-online --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=2 --no-online --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db --start-fast --type=full +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db --start-fast --type=full P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG (16MB, 33%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 (16MB, 66%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000001 (16MB, 99%) checksum f92539dea1f9482e2946c1138eeeecdea29d7f19 @@ -316,7 +321,7 @@ P00 INFO: full backup size = 48MB P00 INFO: new backup label = [BACKUP-FULL-1] P00 DETAIL: statistics: STATISTICS P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --no-log-timestamp --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --no-log-timestamp --repo=1 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db P00 DETAIL: statistics: STATISTICS P00 INFO: expire command end: completed successfully @@ -388,6 +393,7 @@ stanza-upgrade db - successfully upgrade (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-upgrade command end: completed successfully @@ -444,6 +450,7 @@ stanza-upgrade db - upgrade fails with mismatched db-ids (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade ------------------------------------------------------------------------------------------------------------------------------------ P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-upgrade for stanza 'db' on repo1 P00 ERROR: [028]: backup info file and archive info file do not match archive: id = 2, version = 9.5, system-id = 1000000000000000095 backup : id = 3, version = 9.5, system-id = 1000000000000000095 @@ -502,9 +509,9 @@ backrest-checksum="[CHECKSUM]" ------------------------------------------------------------------------------------------------------------------------------------ diff backup - diff changed to full backup (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=2 --no-online --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=2 --no-online --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db --start-fast --type=diff +P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --job-retry=0 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db --start-fast --type=diff P00 WARN: no prior backup exists, diff backup has been changed to full P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG (16MB, 33%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 (16MB, 66%) checksum 51a8525d254c01f5edddda30b7fe697c7e44705c @@ -516,7 +523,7 @@ P00 INFO: full backup size = 48MB P00 INFO: new backup label = [BACKUP-FULL-2] P00 DETAIL: statistics: STATISTICS P00 INFO: backup command end: completed successfully -P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --no-log-timestamp --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db +P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --no-log-timestamp --repo=1 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db P00 INFO: remove archive path: /archive/db/9.3-1 P00 DETAIL: statistics: STATISTICS P00 INFO: expire command end: completed successfully @@ -586,9 +593,9 @@ archive-copy=y start-fast=y stanza-delete db - fail on missing stop file (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db stanza-delete +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --stanza=db stanza-delete ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db P00 ERROR: [055]: stop file does not exist for stanza 'db' HINT: has the pgbackrest stop command been run on this server for this stanza? P00 DETAIL: statistics: STATISTICS @@ -611,9 +618,9 @@ P00 INFO: stop command begin [BACKREST-VERSION]: --config=[TEST_PATH]/backup/p P00 INFO: stop command end: completed successfully stanza-delete db - successfully delete the stanza (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db stanza-delete +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --stanza=db stanza-delete ------------------------------------------------------------------------------------------------------------------------------------ -P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db +P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --exec-id=[EXEC-ID] --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo=1 --repo1-azure-account= --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key= --no-repo1-azure-verify-tls --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db P00 DETAIL: statistics: STATISTICS P00 INFO: stanza-delete command end: completed successfully diff --git a/test/expect/real-all-001.log b/test/expect/real-all-001.log index 3bc33dd54..548c79de7 100644 --- a/test/expect/real-all-001.log +++ b/test/expect/real-all-001.log @@ -14,11 +14,11 @@ check db - verify check command runs successfully (backup host) ------------------------------------------------------------------------------------------------------------------------------------ full backup - fail on backup lock exists (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ -full backup - update during backup (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --buffer-size=[BUFFER-SIZE] --type=full --stanza=db backup +full backup - repo1 (backup host) +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --buffer-size=[BUFFER-SIZE] --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/pgbackrest.conf @@ -120,7 +120,7 @@ archive-copy=y start-fast=y full backup - with disabled expire-auto (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --no-expire-auto --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --no-expire-auto --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/pgbackrest.conf @@ -222,26 +222,26 @@ archive-copy=y start-fast=y expire full=1 (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --stanza=db expire +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --repo=1 --stanza=db expire ------------------------------------------------------------------------------------------------------------------------------------ restore, type 'standby', remap - restore backup on replica (db-standby host) -> [CONTAINER-EXEC] db-standby [BACKREST-BIN] --config=[TEST_PATH]/db-standby/pgbackrest.conf --recovery-option="primary_conninfo=host=db-primary port=6543 user=replicator" --type=standby --link-map="pg_xlog=[TEST_PATH]/db-standby/db/pg_xlog" --link-all --stanza=db restore +> [CONTAINER-EXEC] db-standby [BACKREST-BIN] --config=[TEST_PATH]/db-standby/pgbackrest.conf --recovery-option="primary_conninfo=host=db-primary port=6543 user=replicator" --type=standby --link-map="pg_xlog=[TEST_PATH]/db-standby/db/pg_xlog" --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-standby/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] primary_conninfo = 'host=db-primary port=6543 user=replicator' -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-standby/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-standby/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' standby_mode = 'on' full backup - backup from standby, failure to access at least one standby (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --pg8-host=bogus --backup-standby --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --pg8-host=bogus --backup-standby --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ full backup - backup from standby (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --backup-standby --type=full --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=1 --backup-standby --repo=1 --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/pgbackrest.conf @@ -350,7 +350,7 @@ check db - verify check command on standby (db-standby host) ------------------------------------------------------------------------------------------------------------------------------------ diff backup - backup for adhoc expire (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --type=diff --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --type=diff --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/pgbackrest.conf @@ -459,7 +459,7 @@ stop all stanzas (db-primary host) ------------------------------------------------------------------------------------------------------------------------------------ incr backup - attempt backup when stopped (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ start all stanzas (db-primary host) @@ -467,15 +467,15 @@ start all stanzas (db-primary host) ------------------------------------------------------------------------------------------------------------------------------------ incr backup - fail on archive_mode=always (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db backup +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ expire --set=[BACKUP-DIFF-1] (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --set=[BACKUP-DIFF-1] --stanza=db expire +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --set=[BACKUP-DIFF-1] --repo=1 --stanza=db expire ------------------------------------------------------------------------------------------------------------------------------------ -incr backup - update during backup (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stop-auto --buffer-size=[BUFFER-SIZE] --delta --stanza=db backup +incr backup - delta (backup host) +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stop-auto --buffer-size=[BUFFER-SIZE] --delta --repo=1 --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/pgbackrest.conf @@ -584,97 +584,97 @@ check db - check command with tablespace (backup host) ------------------------------------------------------------------------------------------------------------------------------------ restore, type 'default', expect exit 38 - pg running (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ restore, type 'default', expect exit 40 - path not empty (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ restore, type 'default' (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-include=test2 --db-include=test3 --buffer-size=[BUFFER-SIZE] --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --db-include=test2 --db-include=test3 --buffer-size=[BUFFER-SIZE] --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' restore, force, backup '[BACKUP-FULL-1]', type 'immediate', target-action=promote (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --force --set=[BACKUP-FULL-1] --type=immediate --link-all --target-action=promote --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --force --set=[BACKUP-FULL-1] --type=immediate --link-all --target-action=promote --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' recovery_target = 'immediate' recovery_target_action = 'promote' restore, force, backup '[BACKUP-INCR-1]', type 'xid', target '[XID-TARGET-1]', target-action=promote (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --force --set=[BACKUP-INCR-1] --tablespace-map-all=../../tablespace --type=xid --target="[XID-TARGET-1]" --link-all --target-action=promote --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --force --set=[BACKUP-INCR-1] --tablespace-map-all=../../tablespace --type=xid --target="[XID-TARGET-1]" --link-all --target-action=promote --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' recovery_target_xid = '[XID-TARGET-1]' recovery_target_action = 'promote' restore, type 'preserve' (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --type=preserve --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --type=preserve --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' recovery_target_xid = '[XID-TARGET-1]' recovery_target_action = 'promote' restore delta, type 'time', target '[TIMESTAMP-TARGET-1]', target-action=promote (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --type=time --target="[TIMESTAMP-TARGET-1]" --link-all --target-action=promote --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --type=time --target="[TIMESTAMP-TARGET-1]" --link-all --target-action=promote --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' recovery_target_time = '[TIMESTAMP-TARGET-1]' recovery_target_action = 'promote' restore delta, backup '[BACKUP-INCR-1]', type 'xid', target '[XID-TARGET-1]', exclusive, target-action=promote (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-INCR-1] --type=xid --target="[XID-TARGET-1]" --target-exclusive --link-all --target-action=promote --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-INCR-1] --type=xid --target="[XID-TARGET-1]" --target-exclusive --link-all --target-action=promote --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' recovery_target_xid = '[XID-TARGET-1]' recovery_target_inclusive = 'false' recovery_target_action = 'promote' restore delta, force, type 'name', target 'backrest', target-action=promote (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --force --type=name --target="backrest" --link-all --target-action=promote --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --force --type=name --target="backrest" --link-all --target-action=promote --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' recovery_target_name = 'backrest' recovery_target_action = 'promote' restore delta, backup '[BACKUP-INCR-1]', type 'standby', timeline '4' (db-primary host) -> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-INCR-1] --type=standby --target-timeline="4" --link-all --stanza=db restore +> [CONTAINER-EXEC] db-primary [BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --delta --set=[BACKUP-INCR-1] --type=standby --target-timeline="4" --link-all --repo=1 --stanza=db restore ------------------------------------------------------------------------------------------------------------------------------------ + supplemental file: [TEST_PATH]/db-primary/db/base/recovery.conf ----------------------------------------------------------------- # Recovery settings generated by pgBackRest restore on [TIMESTAMP] -restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --stanza=db archive-get %f "%p"' +restore_command = '[BACKREST-BIN] --config=[TEST_PATH]/db-primary/pgbackrest.conf --repo=1 --stanza=db archive-get %f "%p"' standby_mode = 'on' recovery_target_timeline = '4' @@ -687,7 +687,7 @@ stop db stanza (backup host) ------------------------------------------------------------------------------------------------------------------------------------ stanza-delete db - delete stanza with --force when pgbackrest on pg host not accessible (backup host) -> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --force stanza-delete +> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo=1 --stanza=db --force stanza-delete ------------------------------------------------------------------------------------------------------------------------------------ start all stanzas (db-primary host) diff --git a/test/lib/pgBackRestTest/Common/StorageRepo.pm b/test/lib/pgBackRestTest/Common/StorageRepo.pm index ac1e8cd60..7469342d2 100644 --- a/test/lib/pgBackRestTest/Common/StorageRepo.pm +++ b/test/lib/pgBackRestTest/Common/StorageRepo.pm @@ -49,6 +49,7 @@ sub new $self->{strType}, $self->{lBufferMax}, $self->{iTimeoutIo}, + $self->{iRepo}, $self->{strDefaultPathMode}, $self->{strDefaultFileMode}, ) = @@ -59,6 +60,7 @@ sub new {name => 'strType'}, {name => 'lBufferMax'}, {name => 'iTimeoutIo'}, + {name => 'iRepo'}, {name => 'strDefaultPathMode', optional => true, default => '0750'}, {name => 'strDefaultFileMode', optional => true, default => '0640'}, ); @@ -165,7 +167,7 @@ sub create # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->create'); - $self->exec("repo-create"); + $self->exec("--repo=$self->{iRepo} repo-create"); # Return from function and log return values if any return logDebugReturn($strOperation); @@ -241,7 +243,7 @@ sub get # Get file my ($tResult, $iExitStatus) = $self->exec( (defined($strCipherPass) ? ' --cipher-pass=' . $self->escape($strCipherPass) : '') . ($bRaw ? ' --raw' : '') . - ($bIgnoreMissing ? ' --ignore-missing' : '') . ' repo-get ' . $self->escape($strFile)); + ($bIgnoreMissing ? ' --ignore-missing' : '') . " --repo=$self->{iRepo} repo-get " . $self->escape($strFile)); # Error if missing an not ignored if ($iExitStatus == 1 && !$bIgnoreMissing) @@ -353,7 +355,8 @@ sub manifest ); my $rhManifest = $self->{oJSON}->decode( - $self->exec("--output=json " . ($bRecurse ? ' --recurse' : '') . " repo-ls " . $self->escape($strPathExp))); + $self->exec( + "--output=json" . ($bRecurse ? ' --recurse' : '') . " --repo=$self->{iRepo} repo-ls " . $self->escape($strPathExp))); # Transform the manifest to the old format foreach my $strKey (keys(%{$rhManifest})) @@ -447,7 +450,7 @@ sub pathRemove {name => 'bRecurse', optional => true, default => false}, ); - $self->exec("repo-rm " . ($bRecurse ? '--recurse ' : '') . $self->escape($strPath)); + $self->exec("--repo=$self->{iRepo} repo-rm " . ($bRecurse ? '--recurse ' : '') . $self->escape($strPath)); # Return from function and log return values if any return logDebugReturn($strOperation); @@ -487,7 +490,7 @@ sub put # Put file my $strCommand = "$self->{strCommand}" . (defined($strCipherPass) ? ' --cipher-pass=' . $self->escape($strCipherPass) : '') . - ($bRaw ? ' --raw' : '') . ' repo-put ' . $self->escape($strFile); + ($bRaw ? ' --raw' : '') . " --repo=$self->{iRepo} repo-put " . $self->escape($strFile); my $oBuffer = new pgBackRestTest::Common::Io::Buffered( new pgBackRestTest::Common::Io::Handle($strCommand), $self->{iTimeoutIo}, $self->{lBufferMax}); @@ -533,7 +536,7 @@ sub remove {name => 'xFileExp'}, ); - $self->exec("repo-rm " . $self->escape($strFile)); + $self->exec("--repo=$self->{iRepo} repo-rm " . $self->escape($strFile)); # Return from function and log return values if any return logDebugReturn($strOperation); @@ -585,24 +588,27 @@ sub storageRepo ( $strOperation, $strStanza, + $iRepo, ) = logDebugParam ( __PACKAGE__ . '::storageRepo', \@_, {name => 'strStanza', optional => true, trace => true}, + {name => 'iRepo', optional => true, default => 1, trace => true}, ); # Create storage if not defined - if (!defined($oRepoStorage)) + if (!defined($oRepoStorage->{$iRepo})) { - $oRepoStorage = new pgBackRestTest::Common::StorageRepo($strStorageRepoCommand, $strStorageRepoType, 64 * 1024, 60); + $oRepoStorage->{$iRepo} = new pgBackRestTest::Common::StorageRepo( + $strStorageRepoCommand, $strStorageRepoType, 64 * 1024, 60, $iRepo); } # Return from function and log return values if any return logDebugReturn ( $strOperation, - {name => 'oStorageRepo', value => $oRepoStorage, trace => true}, + {name => 'oStorageRepo', value => $oRepoStorage->{$iRepo}, trace => true}, ); } diff --git a/test/lib/pgBackRestTest/Env/Host/HostBackupTest.pm b/test/lib/pgBackRestTest/Env/Host/HostBackupTest.pm index f40d423e4..7af48b9b2 100644 --- a/test/lib/pgBackRestTest/Env/Host/HostBackupTest.pm +++ b/test/lib/pgBackRestTest/Env/Host/HostBackupTest.pm @@ -161,6 +161,9 @@ sub new $self->{strRepoPath} = '/'; } + # If there is a repo2 it will always be posix on the repo host + $self->{strRepo2Path} = $self->testRunGet()->testPath() . "/$$oParam{strBackupDestination}/" . HOST_PATH_REPO . "2"; + # Set log/lock paths $self->{strLogPath} = $self->testPath() . '/' . HOST_PATH_LOG; storageTest()->pathCreate($self->{strLogPath}, {strMode => '0770'}); @@ -325,6 +328,7 @@ sub backupBegin (defined($$oParam{strOptionalParam}) ? " $$oParam{strOptionalParam}" : '') . (defined($$oParam{bStandby}) && $$oParam{bStandby} ? " --backup-standby" : '') . (defined($oParam->{strRepoType}) ? " --repo1-type=$oParam->{strRepoType}" : '') . + ' --repo=' . (defined($oParam->{iRepo}) ? $oParam->{iRepo} : '1') . ($strType ne 'incr' ? " --type=${strType}" : '') . ' --stanza=' . (defined($oParam->{strStanza}) ? $oParam->{strStanza} : $self->stanza()) . ' backup', {strComment => $strComment, iExpectedExitStatus => $$oParam{iExpectedExitStatus}, @@ -379,112 +383,118 @@ sub backupEnd 'if an alternate stanza is specified it must generate an error - the remaining code will not be aware of the stanza'); } - my $strBackup = $self->backupLast(); + my $strBackup = $self->backupLast($oParam->{iRepo}); - # If a real backup then load the expected manifest from the actual manifest. An expected manifest can't be generated perfectly - # because a running database is always in flux. Even so, it allows us test many things. - if (!$self->synthetic()) + # Only compare backups that are in repo1. There is not a lot of value in comparing backups in other repos and it would require a + # lot of changes to the test harness. + if (!defined($oParam->{iRepo}) || $oParam->{iRepo} == 1) { - $oExpectedManifest = iniParse( - ${storageRepo()->get( - storageRepo()->openRead( - 'backup/' . $self->stanza() . "/${strBackup}/" . FILE_MANIFEST, - {strCipherPass => $self->cipherPassManifest()}))}); - } - - # Make sure tablespace links are correct - if ($self->hasLink()) - { - if ($strType eq CFGOPTVAL_BACKUP_TYPE_FULL || $self->hardLink()) + # If a real backup then load the expected manifest from the actual manifest. An expected manifest can't be generated + # perfectly because a running database is always in flux. Even so, it allows us to test many things. + if (!$self->synthetic()) { - my $hTablespaceManifest = storageTest()->manifest( - $self->repoBackupPath("${strBackup}/" . MANIFEST_TARGET_PGDATA . '/' . DB_PATH_PGTBLSPC)); + $oExpectedManifest = iniParse( + ${storageRepo()->get( + storageRepo()->openRead( + 'backup/' . $self->stanza() . "/${strBackup}/" . FILE_MANIFEST, + {strCipherPass => $self->cipherPassManifest()}))}); + } - # Remove . and .. - delete($hTablespaceManifest->{'.'}); - delete($hTablespaceManifest->{'..'}); - - # Iterate file links - for my $strFile (sort(keys(%{$hTablespaceManifest}))) + # Make sure tablespace links are correct + if ($self->hasLink()) + { + if ($strType eq CFGOPTVAL_BACKUP_TYPE_FULL || $self->hardLink()) { - # Make sure the link is in the expected manifest - my $hManifestTarget = - $oExpectedManifest->{&MANIFEST_SECTION_BACKUP_TARGET}{&MANIFEST_TARGET_PGTBLSPC . "/${strFile}"}; + my $hTablespaceManifest = storageTest()->manifest( + $self->repoBackupPath("${strBackup}/" . MANIFEST_TARGET_PGDATA . '/' . DB_PATH_PGTBLSPC)); - if (!defined($hManifestTarget) || $hManifestTarget->{&MANIFEST_SUBKEY_TYPE} ne MANIFEST_VALUE_LINK || - $hManifestTarget->{&MANIFEST_SUBKEY_TABLESPACE_ID} ne $strFile) + # Remove . and .. + delete($hTablespaceManifest->{'.'}); + delete($hTablespaceManifest->{'..'}); + + # Iterate file links + for my $strFile (sort(keys(%{$hTablespaceManifest}))) { - confess &log(ERROR, "'${strFile}' is not in expected manifest as a link with the correct tablespace id"); + # Make sure the link is in the expected manifest + my $hManifestTarget = + $oExpectedManifest->{&MANIFEST_SECTION_BACKUP_TARGET}{&MANIFEST_TARGET_PGTBLSPC . "/${strFile}"}; + + if (!defined($hManifestTarget) || $hManifestTarget->{&MANIFEST_SUBKEY_TYPE} ne MANIFEST_VALUE_LINK || + $hManifestTarget->{&MANIFEST_SUBKEY_TABLESPACE_ID} ne $strFile) + { + confess &log(ERROR, "'${strFile}' is not in expected manifest as a link with the correct tablespace id"); + } + + # Make sure the link really is a link + if ($hTablespaceManifest->{$strFile}{type} ne 'l') + { + confess &log(ERROR, "'${strFile}' in tablespace directory is not a link"); + } + + # Make sure the link destination is correct + my $strLinkDestination = '../../' . MANIFEST_TARGET_PGTBLSPC . "/${strFile}"; + + if ($hTablespaceManifest->{$strFile}{link_destination} ne $strLinkDestination) + { + confess &log(ERROR, + "'${strFile}' link should reference '${strLinkDestination}' but actually references " . + "'$hTablespaceManifest->{$strFile}{link_destination}'"); + } } - # Make sure the link really is a link - if ($hTablespaceManifest->{$strFile}{type} ne 'l') + # Iterate manifest targets + for my $strTarget (sort(keys(%{$oExpectedManifest->{&MANIFEST_SECTION_BACKUP_TARGET}}))) { - confess &log(ERROR, "'${strFile}' in tablespace directory is not a link"); - } + my $hManifestTarget = $oExpectedManifest->{&MANIFEST_SECTION_BACKUP_TARGET}{$strTarget}; + my $strTablespaceId = $hManifestTarget->{&MANIFEST_SUBKEY_TABLESPACE_ID}; - # Make sure the link destination is correct - my $strLinkDestination = '../../' . MANIFEST_TARGET_PGTBLSPC . "/${strFile}"; - - if ($hTablespaceManifest->{$strFile}{link_destination} ne $strLinkDestination) - { - confess &log(ERROR, - "'${strFile}' link should reference '${strLinkDestination}' but actually references " . - "'$hTablespaceManifest->{$strFile}{link_destination}'"); + # Make sure the target exists as a link on disk + if ($hManifestTarget->{&MANIFEST_SUBKEY_TYPE} eq MANIFEST_VALUE_LINK && defined($strTablespaceId) && + !defined($hTablespaceManifest->{$strTablespaceId})) + { + confess &log(ERROR, + "target '${strTarget}' does not have a link at '" . DB_PATH_PGTBLSPC. "/${strTablespaceId}'"); + } } } - - # Iterate manifest targets - for my $strTarget (sort(keys(%{$oExpectedManifest->{&MANIFEST_SECTION_BACKUP_TARGET}}))) + # Else there should not be a tablespace directory at all. This is only valid for storage that supports links. + elsif (storageRepo()->capability(STORAGE_CAPABILITY_LINK) && + storageTest()->pathExists( + $self->repoBackupPath("${strBackup}/" . MANIFEST_TARGET_PGDATA . '/' . DB_PATH_PGTBLSPC))) { - my $hManifestTarget = $oExpectedManifest->{&MANIFEST_SECTION_BACKUP_TARGET}{$strTarget}; - my $strTablespaceId = $hManifestTarget->{&MANIFEST_SUBKEY_TABLESPACE_ID}; - - # Make sure the target exists as a link on disk - if ($hManifestTarget->{&MANIFEST_SUBKEY_TYPE} eq MANIFEST_VALUE_LINK && defined($strTablespaceId) && - !defined($hTablespaceManifest->{$strTablespaceId})) - { - confess &log(ERROR, - "target '${strTarget}' does not have a link at '" . DB_PATH_PGTBLSPC. "/${strTablespaceId}'"); - } + confess &log(ERROR, 'backup must be full or hard-linked to have ' . DB_PATH_PGTBLSPC . ' directory'); } } - # Else there should not be a tablespace directory at all. This is only valid for storage that supports links. - elsif (storageRepo()->capability(STORAGE_CAPABILITY_LINK) && - storageTest()->pathExists($self->repoBackupPath("${strBackup}/" . MANIFEST_TARGET_PGDATA . '/' . DB_PATH_PGTBLSPC))) + + # Check that latest link exists unless repo links are disabled + my $strLatestLink = $self->repoBackupPath(LINK_LATEST); + my $bLatestLinkExists = storageRepo()->exists($strLatestLink); + + if ((!defined($oParam->{strRepoType}) || $oParam->{strRepoType} eq POSIX) && $self->hasLink()) { - confess &log(ERROR, 'backup must be full or hard-linked to have ' . DB_PATH_PGTBLSPC . ' directory'); + my $strLatestLinkDestination = readlink($strLatestLink); + + if ($strLatestLinkDestination ne $strBackup) + { + confess &log(ERROR, "'" . LINK_LATEST . "' link should be '${strBackup}' but is '${strLatestLinkDestination}"); + } } - } - - # Check that latest link exists unless repo links are disabled - my $strLatestLink = $self->repoBackupPath(LINK_LATEST); - my $bLatestLinkExists = storageRepo()->exists($strLatestLink); - - if ((!defined($oParam->{strRepoType}) || $oParam->{strRepoType} eq POSIX) && $self->hasLink()) - { - my $strLatestLinkDestination = readlink($strLatestLink); - - if ($strLatestLinkDestination ne $strBackup) + elsif ($bLatestLinkExists) { - confess &log(ERROR, "'" . LINK_LATEST . "' link should be '${strBackup}' but is '${strLatestLinkDestination}"); + confess &log(ERROR, "'" . LINK_LATEST . "' link should not exist"); } - } - elsif ($bLatestLinkExists) - { - confess &log(ERROR, "'" . LINK_LATEST . "' link should not exist"); - } - # Only do compare for synthetic backups since for real backups the expected manifest *is* the actual manifest. - if ($self->synthetic()) - { - # Compare only if expected to do so - if ($bManifestCompare) + # Only do compare for synthetic backups since for real backups the expected manifest *is* the actual manifest. + if ($self->synthetic()) { - # Set backup type in the expected manifest - ${$oExpectedManifest}{&MANIFEST_SECTION_BACKUP}{&MANIFEST_KEY_TYPE} = $strType; + # Compare only if expected to do so + if ($bManifestCompare) + { + # Set backup type in the expected manifest + ${$oExpectedManifest}{&MANIFEST_SECTION_BACKUP}{&MANIFEST_KEY_TYPE} = $strType; - $self->backupCompare($strBackup, $oExpectedManifest); + $self->backupCompare($strBackup, $oExpectedManifest); + } } } @@ -514,7 +524,8 @@ sub backupEnd $self->repoBackupPath("${strBackup}/" . FILE_MANIFEST), undef, ${storageRepo()->get( storageRepo()->openRead( - $self->repoBackupPath("${strBackup}/" . FILE_MANIFEST), {strCipherPass => $self->cipherPassManifest()}))}); + $self->repoBackupPath("${strBackup}/" . FILE_MANIFEST), + {strCipherPass => $self->cipherPassManifest()}))}); $self->{oLogTest}->supplementalAdd( $self->repoBackupPath(FILE_BACKUP_INFO), undef, ${storageRepo->get($self->repoBackupPath(FILE_BACKUP_INFO))}); } @@ -752,9 +763,11 @@ sub manifestDefault sub backupLast { my $self = shift; + my $iRepo = shift; - my @stryBackup = storageRepo()->list( - $self->repoBackupPath(), {strExpression => '[0-9]{8}-[0-9]{6}F(_[0-9]{8}-[0-9]{6}(D|I)){0,1}', strSortOrder => 'reverse'}); + my @stryBackup = storageRepo({iRepo => $iRepo})->list( + $self->repoBackupPath(undef, $iRepo), + {strExpression => '[0-9]{8}-[0-9]{6}F(_[0-9]{8}-[0-9]{6}(D|I)){0,1}', strSortOrder => 'reverse'}); if (!defined($stryBackup[0])) { @@ -839,6 +852,7 @@ sub expire (defined($$oParam{iRetentionFull}) ? " --repo1-retention-full=$$oParam{iRetentionFull}" : '') . (defined($$oParam{iRetentionDiff}) ? " --repo1-retention-diff=$$oParam{iRetentionDiff}" : '') . (defined($$oParam{strOptionalParam}) ? " $$oParam{strOptionalParam}" : '') . + ' --repo=' . (defined($oParam->{iRepo}) ? $oParam->{iRepo} : '1') . ' --stanza=' . $self->stanza() . ' expire', {strComment => $strComment, iExpectedExitStatus => $$oParam{iExpectedExitStatus}, oLogTest => $self->{oLogTest}, bLogOutput => $self->synthetic()}); @@ -1031,6 +1045,7 @@ sub stanzaDelete $self->executeSimple( $self->backrestExe() . ' --config=' . $self->backrestConfig() . + ' --repo=' . (defined($oParam->{iRepo}) ? $oParam->{iRepo} : '1') . ' --stanza=' . $self->stanza() . (defined($$oParam{strOptionalParam}) ? " $$oParam{strOptionalParam}" : '') . ' stanza-delete', @@ -1145,6 +1160,13 @@ sub configCreate my $bArchiveAsync = defined($$oParam{bArchiveAsync}) ? $$oParam{bArchiveAsync} : false; + my $iRepoTotal = defined($oParam->{iRepoTotal}) ? $oParam->{iRepoTotal} : 1; + + if ($iRepoTotal < 1 || $iRepoTotal > 2) + { + confess "invalid repo total ${iRepoTotal}"; + } + # General options # ------------------------------------------------------------------------------------------------------------------------------ $oParamHash{&CFGDEF_SECTION_GLOBAL}{'job-retry'} = 0; @@ -1208,6 +1230,11 @@ sub configCreate $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-verify-tls'} = 'n'; } + if ($iRepoTotal == 2) + { + $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo2-path'} = $self->repo2Path(); + } + if (defined($$oParam{bHardlink}) && $$oParam{bHardlink}) { $self->{bHardLink} = true; @@ -1303,6 +1330,14 @@ sub configCreate $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-host-cmd'} = $oHostBackup->backrestExe(); $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-host-config'} = $oHostBackup->backrestConfig(); + if ($iRepoTotal == 2) + { + $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo2-host'} = $oHostBackup->nameGet(); + $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo2-host-user'} = $oHostBackup->userGet(); + $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo2-host-cmd'} = $oHostBackup->backrestExe(); + $oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo2-host-config'} = $oHostBackup->backrestConfig(); + } + $oParamHash{&CFGDEF_SECTION_GLOBAL}{'log-path'} = $self->logPath(); $oParamHash{&CFGDEF_SECTION_GLOBAL}{'lock-path'} = $self->lockPath(); } @@ -1682,6 +1717,7 @@ sub restore $bTablespace, $strUser, $strBackupExpected, + $iRepo, ) = logDebugParam ( @@ -1703,6 +1739,7 @@ sub restore {name => 'bTablespace', optional => true}, {name => 'strUser', optional => true}, {name => 'strBackupExpected', optional => true}, + {name => 'iRepo', optional => true, default => 1}, ); # Build link map options @@ -1740,16 +1777,15 @@ sub restore # - which should be the backup passed as strBackupExpected. If it is not defined, then set it based on the strBackup passed. if (!defined($strBackupExpected)) { - $strBackupExpected = $strBackup eq 'latest' ? $oHostBackup->backupLast() : - $strBackup; + $strBackupExpected = $strBackup eq 'latest' ? $oHostBackup->backupLast($iRepo) : $strBackup; } if (!defined($rhExpectedManifest)) { # Load the manifest from the backup expected to be chosen/processed by restore my $oExpectedManifest = new pgBackRestTest::Env::Manifest( - $self->repoBackupPath($strBackupExpected . qw{/} . FILE_MANIFEST), - {strCipherPass => $oHostBackup->cipherPassManifest()}); + $self->repoBackupPath($strBackupExpected . qw{/} . FILE_MANIFEST, $iRepo), + {strCipherPass => $oHostBackup->cipherPassManifest(), oStorage => storageRepo({iRepo => $iRepo})}); $rhExpectedManifest = $oExpectedManifest->{oContent}; @@ -1817,14 +1853,19 @@ sub restore (defined($strLinkMap) ? $strLinkMap : '') . ($self->synthetic() ? '' : ' --link-all') . (defined($strTargetAction) && $strTargetAction ne 'pause' ? " --target-action=${strTargetAction}" : '') . - ' --stanza=' . $self->stanza() . ' restore', + " --repo=${iRepo} --stanza=" . $self->stanza() . ' restore', {strComment => $strComment, iExpectedExitStatus => $iExpectedExitStatus, oLogTest => $self->{oLogTest}, bLogOutput => $self->synthetic()}, $strUser); if (!defined($iExpectedExitStatus)) { - $self->restoreCompare($strBackupExpected, dclone($rhExpectedManifest), $bTablespace); + # Only compare restores in repo1. There is not a lot of value in comparing restores in other repos and it would require a + # lot of changes to the Perl test harness. + if ($iRepo == 1) + { + $self->restoreCompare($strBackupExpected, dclone($rhExpectedManifest), $bTablespace); + } if (defined($self->{oLogTest})) { @@ -2172,9 +2213,17 @@ sub repoSubPath my $self = shift; my $strSubPath = shift; my $strPath = shift; + my $iRepo = shift; + + my $strRepoPath = $self->repoPath(); + + if (defined($iRepo) && $iRepo == 2) + { + $strRepoPath = $self->repo2Path(); + } return - ($self->{strRepoPath} eq '/' ? '' : $self->{strRepoPath}) . "/${strSubPath}/" . $self->stanza() . + ($strRepoPath eq '/' ? '' : $strRepoPath) . "/${strSubPath}/" . $self->stanza() . (defined($strPath) ? "/${strPath}" : ''); } @@ -2195,8 +2244,9 @@ sub isHostDb {my $self = shift; return $self->isHostDbPrimary() || $self->isHost sub lockPath {return shift->{strLockPath}} sub logPath {return shift->{strLogPath}} sub repoArchivePath {return shift->repoSubPath('archive', shift)} -sub repoBackupPath {return shift->repoSubPath('backup', shift)} +sub repoBackupPath {return shift->repoSubPath('backup', shift, shift)} sub repoPath {return shift->{strRepoPath}} +sub repo2Path {return shift->{strRepo2Path}} sub repoEncrypt {return shift->{bRepoEncrypt}} sub stanza {return testRunGet()->stanza()} sub synthetic {return shift->{bSynthetic}} diff --git a/test/lib/pgBackRestTest/Env/HostEnvTest.pm b/test/lib/pgBackRestTest/Env/HostEnvTest.pm index cdeb8b549..8a3832768 100644 --- a/test/lib/pgBackRestTest/Env/HostEnvTest.pm +++ b/test/lib/pgBackRestTest/Env/HostEnvTest.pm @@ -139,7 +139,8 @@ sub setup strCompressType => $$oConfigParam{strCompressType}, bHardlink => $bHostBackup ? undef : $$oConfigParam{bHardLink}, bArchiveAsync => $$oConfigParam{bArchiveAsync}, - strStorage => $oConfigParam->{strStorage}}); + strStorage => $oConfigParam->{strStorage}, + iRepoTotal => $oConfigParam->{iRepoTotal}}); # Create backup config if backup host exists if (defined($oHostBackup)) @@ -147,7 +148,8 @@ sub setup $oHostBackup->configCreate({ strCompressType => $$oConfigParam{strCompressType}, bHardlink => $$oConfigParam{bHardLink}, - strStorage => $oConfigParam->{strStorage}}); + strStorage => $oConfigParam->{strStorage}, + iRepoTotal => $oConfigParam->{iRepoTotal}}); } # If backup host is not defined set it to db-primary else @@ -171,7 +173,8 @@ sub setup strCompressType => $$oConfigParam{strCompressType}, bHardlink => $bHostBackup ? undef : $$oConfigParam{bHardLink}, bArchiveAsync => $$oConfigParam{bArchiveAsync}, - strStorage => $oConfigParam->{strStorage}}); + strStorage => $oConfigParam->{strStorage}, + iRepoTotal => $oConfigParam->{iRepoTotal}}); } # Create object storage diff --git a/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm b/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm index 1bc789368..fccdd65c7 100644 --- a/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm +++ b/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm @@ -52,19 +52,19 @@ sub run foreach my $rhRun ( - {pg => PG_VERSION_83, repoDest => HOST_DB_PRIMARY, storage => POSIX, encrypt => false, compress => NONE}, - {pg => PG_VERSION_84, repoDest => HOST_BACKUP, storage => AZURE, encrypt => true, compress => GZ}, - {pg => PG_VERSION_90, repoDest => HOST_DB_PRIMARY, storage => POSIX, encrypt => true, compress => BZ2}, - {pg => PG_VERSION_91, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => false, compress => NONE}, - {pg => PG_VERSION_92, repoDest => HOST_DB_STANDBY, storage => POSIX, encrypt => true, compress => NONE}, - {pg => PG_VERSION_93, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => GZ}, - {pg => PG_VERSION_94, repoDest => HOST_DB_STANDBY, storage => POSIX, encrypt => true, compress => LZ4}, - {pg => PG_VERSION_95, repoDest => HOST_BACKUP, storage => S3, encrypt => false, compress => BZ2}, - {pg => PG_VERSION_96, repoDest => HOST_BACKUP, storage => POSIX, encrypt => false, compress => NONE}, - {pg => PG_VERSION_10, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => true, compress => GZ}, - {pg => PG_VERSION_11, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => ZST}, - {pg => PG_VERSION_12, repoDest => HOST_BACKUP, storage => S3, encrypt => true, compress => LZ4}, - {pg => PG_VERSION_13, repoDest => HOST_DB_STANDBY, storage => AZURE, encrypt => false, compress => ZST}, + {pg => PG_VERSION_83, repoDest => HOST_DB_PRIMARY, storage => POSIX, encrypt => false, compress => NONE, repo => 1}, + {pg => PG_VERSION_84, repoDest => HOST_BACKUP, storage => AZURE, encrypt => true, compress => GZ, repo => 1}, + {pg => PG_VERSION_90, repoDest => HOST_DB_PRIMARY, storage => POSIX, encrypt => true, compress => BZ2, repo => 1}, + {pg => PG_VERSION_91, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => false, compress => NONE, repo => 1}, + {pg => PG_VERSION_92, repoDest => HOST_DB_STANDBY, storage => POSIX, encrypt => true, compress => NONE, repo => 1}, + {pg => PG_VERSION_93, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => GZ, repo => 1}, + {pg => PG_VERSION_94, repoDest => HOST_DB_STANDBY, storage => POSIX, encrypt => true, compress => LZ4, repo => 1}, + {pg => PG_VERSION_95, repoDest => HOST_BACKUP, storage => S3, encrypt => false, compress => BZ2, repo => 1}, + {pg => PG_VERSION_96, repoDest => HOST_BACKUP, storage => POSIX, encrypt => false, compress => NONE, repo => 1}, + {pg => PG_VERSION_10, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => true, compress => GZ, repo => 1}, + {pg => PG_VERSION_11, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => ZST, repo => 1}, + {pg => PG_VERSION_12, repoDest => HOST_BACKUP, storage => S3, encrypt => true, compress => LZ4, repo => 1}, + {pg => PG_VERSION_13, repoDest => HOST_DB_STANDBY, storage => AZURE, encrypt => false, compress => ZST, repo => 1}, ) { # Only run tests for this pg version @@ -77,6 +77,7 @@ sub run my $strStorage = $rhRun->{storage}; my $bRepoEncrypt = $rhRun->{encrypt}; my $strCompressType = $rhRun->{compress}; + my $iRepoTotal = $rhRun->{repo}; # Use a specific VM and version of PostgreSQL for expect testing. This version will also be used to run tests that are not # version specific. @@ -93,7 +94,7 @@ sub run false, $self->expect(), {bHostBackup => $bHostBackup, bStandby => $bHostStandby, strBackupDestination => $strBackupDestination, strCompressType => $strCompressType, bArchiveAsync => false, strStorage => $strStorage, - bRepoEncrypt => $bRepoEncrypt}); + bRepoEncrypt => $bRepoEncrypt, iRepoTotal => $iRepoTotal}); # Some commands will fail because of the bogus host created when a standby is present. These options reset the bogus host # so it won't interfere with commands that won't tolerate a connection failure. @@ -192,10 +193,17 @@ sub run # Required to set hint bits to be sent to the standby to make the heap match on both sides $oHostDbPrimary->sqlSelectOneTest('select message from test', $strFullMessage); + # Backup to repo1 my $strFullBackup = $oHostBackup->backup( - CFGOPTVAL_BACKUP_TYPE_FULL, 'update during backup', + CFGOPTVAL_BACKUP_TYPE_FULL, 'repo1', {strOptionalParam => ' --buffer-size=16384'}); + # Backup to repo2 if it exists + if ($iRepoTotal == 2) + { + $oHostBackup->backup(CFGOPTVAL_BACKUP_TYPE_FULL, 'repo2', {iRepo => 2}); + } + # Make a new backup with expire-auto disabled then run the expire command and compare backup numbers to ensure that expire # was really disabled. This test is not version specific so is run on only the expect version. #--------------------------------------------------------------------------------------------------------------------------- @@ -450,7 +458,8 @@ sub run # Exercise --delta checksum option my $strIncrBackup = $oHostBackup->backup( - CFGOPTVAL_BACKUP_TYPE_INCR, 'update during backup', {strOptionalParam => '--stop-auto --buffer-size=32768 --delta'}); + CFGOPTVAL_BACKUP_TYPE_INCR, 'delta', + {strOptionalParam => '--stop-auto --buffer-size=32768 --delta', iRepo => $iRepoTotal}); # Ensure the check command runs properly with a tablespace $oHostBackup->check( 'check command with tablespace', {iTimeout => 5, strOptionalParam => $strBogusReset}); @@ -520,7 +529,8 @@ sub run # Now the restore should work $oHostDbPrimary->restore( - undef, 'latest', {strOptionalParam => ' --db-include=test2 --db-include=test3 --buffer-size=16384'}); + undef, 'latest', + {strOptionalParam => ' --db-include=test2 --db-include=test3 --buffer-size=16384', iRepo => $iRepoTotal}); # Test that the first database has not been restored since --db-include did not include test1 my ($strSHA1, $lSize) = storageTest()->hashSize($strDb1TablePath); @@ -646,7 +656,8 @@ sub run {bForce => true, strType => CFGOPTVAL_RESTORE_TYPE_XID, strTarget => $strXidTarget, strTargetAction => $oHostDbPrimary->pgVersion() >= PG_VERSION_91 ? 'promote' : undef, strTargetTimeline => $oHostDbPrimary->pgVersion() >= PG_VERSION_12 ? 'current' : undef, - strOptionalParam => '--tablespace-map-all=../../tablespace', bTablespace => false}); + strOptionalParam => '--tablespace-map-all=../../tablespace', bTablespace => false, + iRepo => $iRepoTotal}); # Save recovery file to test so we can use it in the next test $strRecoveryFile = $oHostDbPrimary->pgVersion() >= PG_VERSION_12 ? 'postgresql.auto.conf' : DB_FILE_RECOVERYCONF; @@ -712,7 +723,8 @@ sub run undef, $strIncrBackup, {bDelta => true, strType => CFGOPTVAL_RESTORE_TYPE_XID, strTarget => $strXidTarget, bTargetExclusive => true, strTargetAction => $oHostDbPrimary->pgVersion() >= PG_VERSION_91 ? 'promote' : undef, - strTargetTimeline => $oHostDbPrimary->pgVersion() >= PG_VERSION_12 ? 'current' : undef}); + strTargetTimeline => $oHostDbPrimary->pgVersion() >= PG_VERSION_12 ? 'current' : undef, + iRepo => $iRepoTotal}); $oHostDbPrimary->clusterStart(); $oHostDbPrimary->sqlSelectOneTest('select message from test', $strIncrMessage); @@ -751,7 +763,7 @@ sub run {bDelta => true, strType => $oHostDbPrimary->pgVersion() >= PG_VERSION_90 ? CFGOPTVAL_RESTORE_TYPE_STANDBY : CFGOPTVAL_RESTORE_TYPE_DEFAULT, - strTargetTimeline => 4}); + strTargetTimeline => 4, iRepo => $iRepoTotal}); $oHostDbPrimary->clusterStart({bHotStandby => true}); $oHostDbPrimary->sqlSelectOneTest('select message from test', $strTimelineMessage, {iTimeout => 120}); diff --git a/test/src/module/command/archiveGetTest.c b/test/src/module/command/archiveGetTest.c index 65918f92d..99f104ffd 100644 --- a/test/src/module/command/archiveGetTest.c +++ b/test/src/module/command/archiveGetTest.c @@ -612,17 +612,22 @@ testRun(void) buffer, .compressType = compressTypeGz, .cipherType = cipherTypeAes256Cbc, .cipherPass = TEST_CIPHER_PASS_ARCHIVE); // Add encryption options - argList = strLstDup(argBaseList); - hrnCfgArgRawZ(argList, cfgOptRepoCipherType, CIPHER_TYPE_AES_256_CBC); - hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS); + argList = strLstNew(); + hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH_PG); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo-bogus"); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, TEST_PATH_REPO); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoCipherType, 2, CIPHER_TYPE_AES_256_CBC); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, TEST_CIPHER_PASS); + hrnCfgArgRawZ(argList, cfgOptStanza, "test1"); strLstAddZ(argList, "01ABCDEF01ABCDEF01ABCDEF"); strLstAddZ(argList, TEST_PATH_PG "/pg_wal/RECOVERYXLOG"); harnessCfgLoad(cfgCmdArchiveGet, argList); - hrnCfgEnvRemoveRaw(cfgOptRepoCipherPass); + hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 2); TEST_RESULT_INT(cmdArchiveGet(), 0, "get"); - harnessLogResult("P00 INFO: found 01ABCDEF01ABCDEF01ABCDEF in the repo1:10-1 archive"); + harnessLogResult("P00 INFO: found 01ABCDEF01ABCDEF01ABCDEF in the repo2:10-1 archive"); TEST_STORAGE_LIST(storageTest, TEST_PATH_PG "/pg_wal", "RECOVERYXLOG\n"); TEST_RESULT_UINT( @@ -644,9 +649,9 @@ testRun(void) // Add archive-async and spool path hrnCfgArgRawZ(argList, cfgOptSpoolPath, TEST_PATH_SPOOL); hrnCfgArgRawBool(argList, cfgOptArchiveAsync, true); - hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, TEST_CIPHER_PASS); harnessCfgLoadRole(cfgCmdArchiveGet, cfgCmdRoleLocal, argList); - hrnCfgEnvRemoveRaw(cfgOptRepoCipherPass); + hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 2); // Setup protocol command VariantList *paramList = varLstNew(); diff --git a/test/src/module/command/archivePushTest.c b/test/src/module/command/archivePushTest.c index eae1a93bf..0319fc9dc 100644 --- a/test/src/module/command/archivePushTest.c +++ b/test/src/module/command/archivePushTest.c @@ -133,8 +133,9 @@ testRun(void) "1={\"db-id\":5555555555555555555,\"db-version\":\"9.4\"}\n")); TEST_ERROR( - archivePushCheck(true, cipherTypeNone, NULL), ArchiveMismatchError, - "PostgreSQL version 9.6, system-id 18072658121562454734 do not match stanza version 9.4, system-id 5555555555555555555" + archivePushCheck(true), ArchiveMismatchError, + "PostgreSQL version 9.6, system-id 18072658121562454734 do not match repo1 stanza version 9.4, system-id" + " 5555555555555555555" "\nHINT: are you archiving to the correct stanza?"); // Fix the version @@ -148,8 +149,9 @@ testRun(void) "1={\"db-id\":5555555555555555555,\"db-version\":\"9.6\"}\n")); TEST_ERROR( - archivePushCheck(true, cipherTypeNone, NULL), ArchiveMismatchError, - "PostgreSQL version 9.6, system-id 18072658121562454734 do not match stanza version 9.6, system-id 5555555555555555555" + archivePushCheck(true), ArchiveMismatchError, + "PostgreSQL version 9.6, system-id 18072658121562454734 do not match repo1 stanza version 9.6, system-id" + " 5555555555555555555" "\nHINT: are you archiving to the correct stanza?"); // Fix archive info @@ -163,12 +165,73 @@ testRun(void) "1={\"db-id\":18072658121562454734,\"db-version\":\"9.6\"}\n")); ArchivePushCheckResult result = {0}; - TEST_ASSIGN(result, archivePushCheck(true, cipherTypeNone, NULL), "get archive check result"); + TEST_ASSIGN(result, archivePushCheck(true), "get archive check result"); TEST_RESULT_UINT(result.pgVersion, PG_VERSION_96, "check pg version"); TEST_RESULT_UINT(result.pgSystemId, 0xFACEFACEFACEFACE, "check pg system id"); - TEST_RESULT_STR_Z(result.archiveId, "9.6-1", "check archive id"); - TEST_RESULT_STR_Z(result.archiveCipherPass, NULL, "check archive cipher pass (not set in this test)"); + TEST_RESULT_STR_Z(result.repoData[0].archiveId, "9.6-1", "check archive id"); + TEST_RESULT_UINT(result.repoData[0].cipherType, cipherTypeNone, "check cipher type"); + TEST_RESULT_STR_Z(result.repoData[0].cipherPass, NULL, "check cipher pass (not set in this test)"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("mismatched repos when pg-path not present"); + + argList = strLstNew(); + strLstAddZ(argList, "--stanza=test"); + strLstAdd(argList, strNewFmt("--repo2-path=%s/repo2", testPath())); + strLstAdd(argList, strNewFmt("--repo4-path=%s/repo4", testPath())); + harnessCfgLoad(cfgCmdArchivePush, argList); + + // repo2 has correct info + storagePutP( + storageNewWriteP(storageTest, strNew("repo2/archive/test/archive.info")), + harnessInfoChecksumZ( + "[db]\n" + "db-id=1\n" + "\n" + "[db:history]\n" + "1={\"db-id\":18072658121562454734,\"db-version\":\"9.6\"}\n")); + + // repo4 has incorrect info + storagePutP( + storageNewWriteP(storageTest, strNew("repo4/archive/test/archive.info")), + harnessInfoChecksumZ( + "[db]\n" + "db-id=1\n" + "\n" + "[db:history]\n" + "1={\"db-id\":5555555555555555555,\"db-version\":\"9.4\"}\n")); + + TEST_ERROR( + archivePushCheck(false), ArchiveMismatchError, + "repo2 stanza version 9.6, system-id 18072658121562454734 do not match repo4 stanza version 9.4, system-id" + " 5555555555555555555" + "\nHINT: are you archiving to the correct stanza?"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("matched repos when pg-path not present"); + + // repo4 has correct info + storagePutP( + storageNewWriteP(storageTest, strNew("repo4/archive/test/archive.info")), + harnessInfoChecksumZ( + "[db]\n" + "db-id=2\n" + "\n" + "[db:history]\n" + "1={\"db-id\":5555555555555555555,\"db-version\":\"9.4\"}\n" + "2={\"db-id\":18072658121562454734,\"db-version\":\"9.6\"}\n")); + + TEST_ASSIGN(result, archivePushCheck(false), "get archive check result"); + + TEST_RESULT_UINT(result.pgVersion, PG_VERSION_96, "check pg version"); + TEST_RESULT_UINT(result.pgSystemId, 0xFACEFACEFACEFACE, "check pg system id"); + TEST_RESULT_STR_Z(result.repoData[0].archiveId, "9.6-1", "check repo2 archive id"); + TEST_RESULT_UINT(result.repoData[0].cipherType, cipherTypeNone, "check repo2 cipher pass"); + TEST_RESULT_STR_Z(result.repoData[0].cipherPass, NULL, "check repo2 cipher pass (not set in this test)"); + TEST_RESULT_STR_Z(result.repoData[1].archiveId, "9.6-2", "check repo4 archive id"); + TEST_RESULT_UINT(result.repoData[1].cipherType, cipherTypeNone, "check repo4 cipher type"); + TEST_RESULT_STR_Z(result.repoData[1].cipherPass, NULL, "check repo4 cipher pass (not set in this test)"); } // ***************************************************************************************************************************** @@ -279,7 +342,7 @@ testRun(void) TEST_RESULT_VOID(cmdArchivePush(), "push the WAL segment again"); harnessLogResult( - "P00 WARN: WAL file '000000010000000100000001' already exists in the archive with the same checksum\n" + "P00 WARN: WAL file '000000010000000100000001' already exists in the repo1 archive with the same checksum\n" " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" "P00 INFO: pushed WAL file '000000010000000100000001' to the archive"); @@ -292,7 +355,9 @@ testRun(void) storagePutP(storageNewWriteP(storagePgWrite(), strNew("pg_wal/000000010000000100000001")), walBuffer2); - TEST_ERROR(cmdArchivePush(), ArchiveDuplicateError, "WAL file '000000010000000100000001' already exists in the archive"); + TEST_ERROR( + cmdArchivePush(), ArchiveDuplicateError, + "WAL file '000000010000000100000001' already exists in the repo1 archive with a different checksum"); // ------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("WAL with absolute path and no pg1-path"); @@ -363,7 +428,7 @@ testRun(void) TEST_RESULT_VOID(cmdArchivePush(), "push WAL file again"); harnessLogResult( - "P00 WARN: WAL file '000000010000000100000002' already exists in the archive with the same checksum\n" + "P00 WARN: WAL file '000000010000000100000002' already exists in the repo1 archive with the same checksum\n" " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" "P00 INFO: pushed WAL file '000000010000000100000002' to the archive"); @@ -371,20 +436,20 @@ testRun(void) // ------------------------------------------------------------------------------------------------------------------------- VariantList *paramList = varLstNew(); varLstAdd(paramList, varNewStr(strNewFmt("%s/pg/pg_wal/000000010000000100000002", testPath()))); - varLstAdd(paramList, varNewStrZ("11-1")); varLstAdd(paramList, varNewUInt64(PG_VERSION_11)); varLstAdd(paramList, varNewUInt64(0xFACEFACEFACEFACE)); varLstAdd(paramList, varNewStrZ("000000010000000100000002")); - varLstAdd(paramList, varNewUInt64(cipherTypeNone)); - varLstAdd(paramList, NULL); varLstAdd(paramList, varNewBool(false)); varLstAdd(paramList, varNewInt(6)); + varLstAdd(paramList, varNewStrZ("11-1")); + varLstAdd(paramList, varNewUInt64(cipherTypeNone)); + varLstAdd(paramList, NULL); TEST_RESULT_BOOL( archivePushProtocol(PROTOCOL_COMMAND_ARCHIVE_PUSH_STR, paramList, server), true, "protocol archive put"); TEST_RESULT_STR_Z( strNewBuf(serverWrite), - "{\"out\":\"WAL file '000000010000000100000002' already exists in the archive with the same checksum" + "{\"out\":\"WAL file '000000010000000100000002' already exists in the repo1 archive with the same checksum" "\\nHINT: this is valid in some recovery scenarios but may also indicate a problem.\"}\n", "check result"); @@ -394,11 +459,14 @@ testRun(void) // ------------------------------------------------------------------------------------------------------------------------- TEST_RESULT_BOOL(archivePushProtocol(strNew(BOGUS_STR), paramList, server), false, "invalid function"); - // Create a new encrypted repo to test encryption // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("multiple repos, one encrypted"); + + // Remove old repo storagePathRemoveP(storageTest, strNew("repo"), .errorOnMissing = true, .recurse = true); - StorageWrite *infoWrite = storageNewWriteP(storageTest, strNew("repo/archive/test/archive.info")); + // repo2 is encrypted + StorageWrite *infoWrite = storageNewWriteP(storageTest, strNew("repo2/archive/test/archive.info")); ioFilterGroupAdd( ioWriteFilterGroup(storageWriteIo(infoWrite)), cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, @@ -416,22 +484,70 @@ testRun(void) "[db:history]\n" "1={\"db-id\":18072658121562454734,\"db-version\":\"11\"}")); + // repo3 is not encrypted + storagePutP( + storageNewWriteP(storageTest, strNew("repo3/archive/test/archive.info")), + harnessInfoChecksumZ( + "[db]\n" + "db-id=1\n" + "\n" + "[db:history]\n" + "1={\"db-id\":18072658121562454734,\"db-version\":\"11\"}")); + // Push encrypted WAL segment - argListTemp = strLstDup(argList); + argListTemp = strLstNew(); + hrnCfgArgRawZ(argListTemp, cfgOptStanza, "test"); + hrnCfgArgKeyRawFmt(argListTemp, cfgOptPgPath, 1, "%s/pg", testPath()); + hrnCfgArgKeyRawFmt(argListTemp, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawZ(argListTemp, cfgOptRepoCipherType, 2, CIPHER_TYPE_AES_256_CBC); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, "badpassphrase"); + hrnCfgArgKeyRawFmt(argListTemp, cfgOptRepoPath, 3, "%s/repo3", testPath()); + hrnCfgArgRawNegate(argListTemp, cfgOptCompress); strLstAddZ(argListTemp, "pg_wal/000000010000000100000002"); - strLstAddZ(argListTemp, "--repo1-cipher-type=aes-256-cbc"); - strLstAddZ(argListTemp, "--no-compress"); - setenv("PGBACKREST_REPO1_CIPHER_PASS", "badpassphrase", true); harnessCfgLoad(cfgCmdArchivePush, argListTemp); - unsetenv("PGBACKREST_REPO1_CIPHER_PASS"); + hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 2); TEST_RESULT_VOID(cmdArchivePush(), "push the WAL segment"); harnessLogResult("P00 INFO: pushed WAL file '000000010000000100000002' to the archive"); TEST_RESULT_BOOL( storageExistsP( - storageTest, strNewFmt("repo/archive/test/11-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1)), - true, "check repo for WAL file"); + storageTest, strNewFmt("repo2/archive/test/11-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1)), + true, "check repo2 for WAL file"); + + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo3/archive/test/11-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1)), + true, "check repo3 for WAL file"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("remove WAL from one repo and push again"); + + storageRemoveP( + storageTest, strNewFmt("repo2/archive/test/11-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1), + .errorOnMissing = true); + + TEST_RESULT_VOID(cmdArchivePush(), "push the WAL segment"); + harnessLogResult( + "P00 WARN: WAL file '000000010000000100000002' already exists in the repo3 archive with the same checksum\n" + " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" + "P00 INFO: pushed WAL file '000000010000000100000002' to the archive"); + + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo2/archive/test/11-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1)), + true, "check repo2 for WAL file"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("WAL already exists in both repos"); + + TEST_RESULT_VOID(cmdArchivePush(), "push the WAL segment"); + harnessLogResult( + "P00 WARN: WAL file '000000010000000100000002' already exists in the repo2 archive with the same checksum\n" + " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" + " WAL file '000000010000000100000002' already exists in the repo3 archive with the same checksum\n" + " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" + "P00 INFO: pushed WAL file '000000010000000100000002' to the archive"); } // ***************************************************************************************************************************** @@ -638,11 +754,29 @@ testRun(void) "global.error\n", "check status files"); // ------------------------------------------------------------------------------------------------------------------------- - TEST_TITLE("push already pushed WAL and error on missing WAL"); + TEST_TITLE("add repo, push already pushed WAL and new WAL"); + + // Add repo3 + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 3, "%s/repo3", testPath()); + harnessCfgLoadRole(cfgCmdArchivePush, cfgCmdRoleAsync, argList); + + storagePutP( + storageNewWriteP(storageTest, strNew("repo3/archive/test/archive.info")), + harnessInfoChecksumZ( + "[db]\n" + "db-id=1\n" + "\n" + "[db:history]\n" + "1={\"db-id\":12297848147757817309,\"db-version\":\"9.4\"}\n")); // Recreate ready file for WAL 1 storagePutP(storageNewWriteP(storagePgWrite(), strNew("pg_xlog/archive_status/000000010000000100000001.ready")), NULL); + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo/archive/test/9.4-1/0000000100000001/000000010000000100000001-%s", walBuffer1Sha1)), + true, "check repo1 for WAL 1 file"); + // Create a ready file for WAL 2 but don't create the segment yet -- this will test the file error storagePutP(storageNewWriteP(storagePgWrite(), strNew("pg_xlog/archive_status/000000010000000100000002.ready")), NULL); @@ -651,7 +785,7 @@ testRun(void) strZ( strNewFmt( "P00 INFO: push 2 WAL file(s) to archive: 000000010000000100000001...000000010000000100000002\n" - "P01 WARN: WAL file '000000010000000100000001' already exists in the archive with the same checksum\n" + "P01 WARN: WAL file '000000010000000100000001' already exists in the repo1 archive with the same checksum\n" " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" "P01 DETAIL: pushed WAL file '000000010000000100000001' to the archive\n" "P01 WARN: could not push WAL file '000000010000000100000002' to the archive (will be retried): " @@ -661,7 +795,12 @@ testRun(void) TEST_RESULT_BOOL( storageExistsP( storageTest, strNewFmt("repo/archive/test/9.4-1/0000000100000001/000000010000000100000001-%s", walBuffer1Sha1)), - true, "check repo for WAL 1 file"); + true, "check repo1 for WAL 1 file"); + + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo/archive/test/9.4-1/0000000100000001/000000010000000100000001-%s", walBuffer1Sha1)), + true, "check repo3 for WAL 1 file"); TEST_RESULT_STRLST_Z( strLstSort(storageListP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_OUT)), sortOrderAsc), @@ -691,12 +830,63 @@ testRun(void) TEST_RESULT_BOOL( storageExistsP( storageTest, strNewFmt("repo/archive/test/9.4-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1)), - true, "check repo for WAL 2 file"); + true, "check repo1 for WAL 2 file"); + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo3/archive/test/9.4-1/0000000100000001/000000010000000100000002-%s", walBuffer2Sha1)), + true, "check repo3 for WAL 2 file"); TEST_RESULT_STRLST_Z( strLstSort(storageListP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_OUT)), sortOrderAsc), "000000010000000100000001.ok\n000000010000000100000002.ok\n", "check status files"); + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("push wal 2 again to get warnings from both repos"); + + // Remove the OK file so the WAL gets pushed again + storageRemoveP(storageSpoolWrite(), STRDEF(STORAGE_SPOOL_ARCHIVE_OUT "/000000010000000100000002.ok")); + + TEST_RESULT_VOID(cmdArchivePushAsync(), "push WAL segments"); + harnessLogResult( + "P00 INFO: push 1 WAL file(s) to archive: 000000010000000100000002\n" + "P01 WARN: WAL file '000000010000000100000002' already exists in the repo1 archive with the same checksum\n" + " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" + " WAL file '000000010000000100000002' already exists in the repo3 archive with the same checksum\n" + " HINT: this is valid in some recovery scenarios but may also indicate a problem.\n" + "P01 DETAIL: pushed WAL file '000000010000000100000002' to the archive"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("create and push WAL 3 to both repos"); + + // Create WAL 3 segment + Buffer *walBuffer3 = bufNew((size_t)16 * 1024 * 1024); + bufUsedSet(walBuffer3, bufSize(walBuffer3)); + memset(bufPtr(walBuffer3), 0x44, bufSize(walBuffer3)); + pgWalTestToBuffer((PgWal){.version = PG_VERSION_94, .systemId = 0xAAAABBBBCCCCDDDD}, walBuffer3); + const char *walBuffer3Sha1 = strZ(bufHex(cryptoHashOne(HASH_TYPE_SHA1_STR, walBuffer3))); + + storagePutP(storageNewWriteP(storagePgWrite(), strNew("pg_xlog/000000010000000100000003")), walBuffer3); + + // Create ready file + storagePutP(storageNewWriteP(storagePgWrite(), strNew("pg_xlog/archive_status/000000010000000100000003.ready")), NULL); + + TEST_RESULT_VOID(cmdArchivePushAsync(), "push WAL segment"); + harnessLogResult( + "P00 INFO: push 1 WAL file(s) to archive: 000000010000000100000003\n" + "P01 DETAIL: pushed WAL file '000000010000000100000003' to the archive"); + + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo/archive/test/9.4-1/0000000100000001/000000010000000100000003-%s", walBuffer3Sha1)), + true, "check repo1 for WAL 3 file"); + TEST_RESULT_BOOL( + storageExistsP( + storageTest, strNewFmt("repo3/archive/test/9.4-1/0000000100000001/000000010000000100000003-%s", walBuffer3Sha1)), + true, "check repo3 for WAL 3 file"); + + // Remove the ready file to prevent WAL 3 from being considered for the next test + storageRemoveP(storagePgWrite(), strNew("pg_xlog/archive_status/000000010000000100000003.ready"), .errorOnMissing = true); + // Check that drop functionality works // ------------------------------------------------------------------------------------------------------------------------- // Remove status files diff --git a/test/src/module/command/backupTest.c b/test/src/module/command/backupTest.c index 47525f447..9f1af0a44 100644 --- a/test/src/module/command/backupTest.c +++ b/test/src/module/command/backupTest.c @@ -1465,6 +1465,7 @@ testRun(void) harnessCfgLoad(cfgCmdStanzaCreate, argList); cmdStanzaCreate(); + harnessLogResult("P00 INFO: stanza-create for stanza 'test1' on repo1"); // ------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("error when pg appears to be running"); @@ -1640,6 +1641,7 @@ testRun(void) harnessCfgLoad(cfgCmdStanzaCreate, argList); cmdStanzaCreate(); + harnessLogResult("P00 INFO: stanza-create for stanza 'test1' on repo1"); // Load options argList = strLstNew(); @@ -2105,6 +2107,7 @@ testRun(void) harnessCfgLoad(cfgCmdStanzaUpgrade, argList); cmdStanzaUpgrade(); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'test1' on repo1"); // Load options argList = strLstNew(); @@ -2253,6 +2256,7 @@ testRun(void) harnessCfgLoad(cfgCmdStanzaUpgrade, argList); cmdStanzaUpgrade(); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'test1' on repo1"); // Load options argList = strLstNew(); @@ -2477,7 +2481,11 @@ testRun(void) // Load options StringList *argList = strLstNew(); strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); - hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo-bogus"); + hrnCfgArgKeyRaw(argList, cfgOptRepoPath, 2, repoPath); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionFull, 2, "1"); + hrnCfgArgKeyRawBool(argList, cfgOptRepoHardlink, 2, true); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path); hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1"); strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR); diff --git a/test/src/module/command/checkTest.c b/test/src/module/command/checkTest.c index 67f48fbc9..a16945ddd 100644 --- a/test/src/module/command/checkTest.c +++ b/test/src/module/command/checkTest.c @@ -72,14 +72,15 @@ testRun(void) TEST_ERROR(cmdCheck(), ConfigError, "primary database not found\nHINT: check indexed pg-path/pg-host configurations"); - // Standby only, repo remote but more than one pg-path configured + // Standby only, one of multiple repos remote but more than one pg-path configured // ------------------------------------------------------------------------------------------------------------------------- argList = strLstNew(); strLstAdd(argList, stanzaOpt); strLstAdd(argList, pg1PathOpt); strLstAddZ(argList, "--pg8-path=/path/to/standby2"); strLstAddZ(argList, "--pg8-port=5433"); - strLstAddZ(argList, "--repo1-host=repo.domain.com"); + strLstAdd(argList, strNewFmt("--repo1-path=%s/repo", testPath())); + strLstAddZ(argList, "--repo2-host=repo.domain.com"); strLstAddZ(argList, "--archive-timeout=.5"); harnessCfgLoad(cfgCmdCheck, argList); @@ -175,32 +176,7 @@ testRun(void) "HINT: the pg1-path and pg1-port settings likely reference different clusters.", strZ(pgVersionToStr(PG_VERSION_92)), testPath(), strZ(pgVersionToStr(PG_VERSION_92)), strZ(pg1Path), strZ(pg1Path)); - // Standby - Stanza has not yet been created - // ------------------------------------------------------------------------------------------------------------------------- - harnessPqScriptSet((HarnessPq []) - { - HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_92, strZ(pg1Path), true, NULL, NULL), - HRNPQ_MACRO_OPEN_GE_92(8, "dbname='postgres' port=5433", PG_VERSION_92, strZ(pg8Path), false, NULL, NULL), - - HRNPQ_MACRO_CLOSE(8), - HRNPQ_MACRO_CLOSE(1), - - HRNPQ_MACRO_DONE() - }); - - TEST_ERROR_FMT( - cmdCheck(), FileMissingError, - "unable to load info file '%s/repo/archive/test1/archive.info' or '%s/repo/archive/test1/archive.info.copy':\n" - "FileMissingError: " STORAGE_ERROR_READ_MISSING "\n" - "FileMissingError: " STORAGE_ERROR_READ_MISSING "\n" - "HINT: archive.info cannot be opened but is required to push/get WAL segments.\n" - "HINT: is archive_command configured correctly in postgresql.conf?\n" - "HINT: has a stanza-create been performed?\n" - "HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme.", - testPath(), testPath(), strZ(strNewFmt("%s/repo/archive/test1/archive.info", testPath())), - strZ(strNewFmt("%s/repo/archive/test1/archive.info.copy", testPath()))); - - // Standby - Stanza created + // Standby // ------------------------------------------------------------------------------------------------------------------------- // Create pg_control for primary storagePutP( @@ -208,33 +184,33 @@ testRun(void) pgControlTestToBuffer((PgControl){.version = PG_VERSION_92, .systemId = 6569239123849665679})); // Create info files - storagePutP( - storageNewWriteP(storageRepoWrite(), INFO_ARCHIVE_PATH_FILE_STR), - harnessInfoChecksum( - strNew( - "[db]\n" - "db-id=1\n" - "db-system-id=6569239123849665679\n" - "db-version=\"9.2\"\n" - "\n" - "[db:history]\n" - "1={\"db-id\":6569239123849665679,\"db-version\":\"9.2\"}\n"))); + const Buffer *archiveInfoContent = harnessInfoChecksum( + strNew( + "[db]\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.2\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6569239123849665679,\"db-version\":\"9.2\"}\n")); - storagePutP( - storageNewWriteP(storageRepoWrite(), INFO_BACKUP_PATH_FILE_STR), - harnessInfoChecksum( - strNew( - "[db]\n" - "db-catalog-version=201608131\n" - "db-control-version=920\n" - "db-id=1\n" - "db-system-id=6569239123849665679\n" - "db-version=\"9.2\"\n" - "\n" - "[db:history]\n" - "1={\"db-catalog-version\":201608131,\"db-control-version\":920,\"db-system-id\":6569239123849665679," - "\"db-version\":\"9.2\"}\n"))); + storagePutP(storageNewWriteP(storageRepoIdxWrite(0), INFO_ARCHIVE_PATH_FILE_STR), archiveInfoContent); + const Buffer *backupInfoContent = harnessInfoChecksum( + strNew( + "[db]\n" + "db-catalog-version=201608131\n" + "db-control-version=920\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.2\"\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201608131,\"db-control-version\":920,\"db-system-id\":6569239123849665679," + "\"db-version\":\"9.2\"}\n")); + storagePutP(storageNewWriteP(storageRepoIdxWrite(0), INFO_BACKUP_PATH_FILE_STR), backupInfoContent); + + // Single repo config - error when checking archive mode setting on database harnessPqScriptSet((HarnessPq []) { HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_92, strZ(pg1Path), true, NULL, NULL), @@ -248,17 +224,55 @@ testRun(void) // Error on primary but standby check ok TEST_ERROR_FMT(cmdCheck(), ArchiveDisabledError, "archive_mode must be enabled"); - harnessLogResult("P00 INFO: switch wal not performed because this is a standby"); + harnessLogResult( + "P00 INFO: check repo1 (standby)\n" + "P00 INFO: switch wal not performed because this is a standby"); + + // Multi-repo - add a second repo (repo2) + StringList *argListRepo2 = strLstDup(argList); + strLstAdd(argListRepo2, strNewFmt("--repo2-path=%s/repo2", testPath())); + harnessCfgLoad(cfgCmdCheck, argListRepo2); + + harnessPqScriptSet((HarnessPq []) + { + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_92, strZ(pg1Path), true, NULL, NULL), + HRNPQ_MACRO_OPEN_GE_92(8, "dbname='postgres' port=5433", PG_VERSION_92, strZ(pg8Path), false, NULL, NULL), + + HRNPQ_MACRO_CLOSE(8), + HRNPQ_MACRO_CLOSE(1), + + HRNPQ_MACRO_DONE() + }); + + // Stanza has not yet been created on repo2 but is created (and checked) on repo1 + TEST_ERROR_FMT( + cmdCheck(), FileMissingError, + "unable to load info file '%s/repo2/archive/test1/archive.info' or '%s/repo2/archive/test1/archive.info.copy':\n" + "FileMissingError: " STORAGE_ERROR_READ_MISSING "\n" + "FileMissingError: " STORAGE_ERROR_READ_MISSING "\n" + "HINT: archive.info cannot be opened but is required to push/get WAL segments.\n" + "HINT: is archive_command configured correctly in postgresql.conf?\n" + "HINT: has a stanza-create been performed?\n" + "HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme.", + testPath(), testPath(), strZ(strNewFmt("%s/repo2/archive/test1/archive.info", testPath())), + strZ(strNewFmt("%s/repo2/archive/test1/archive.info.copy", testPath()))); + harnessLogResult("P00 INFO: check repo1 (standby)\nP00 INFO: check repo2 (standby)"); // Single primary // ------------------------------------------------------------------------------------------------------------------------- + // Multi repo argList = strLstNew(); strLstAdd(argList, stanzaOpt); strLstAdd(argList, pg1PathOpt); strLstAdd(argList, strNewFmt("--repo1-path=%s/repo", testPath())); + strLstAdd(argList, strNewFmt("--repo2-path=%s/repo2", testPath())); strLstAddZ(argList, "--archive-timeout=.5"); harnessCfgLoad(cfgCmdCheck, argList); + // Create stanza files on repo2 + storagePutP(storageNewWriteP(storageRepoIdxWrite(1), INFO_ARCHIVE_PATH_FILE_STR), archiveInfoContent); + storagePutP(storageNewWriteP(storageRepoIdxWrite(1), INFO_BACKUP_PATH_FILE_STR), backupInfoContent); + // Error when WAL segment not found harnessPqScriptSet((HarnessPq []) { @@ -275,13 +289,17 @@ testRun(void) "HINT: check the archive_command to ensure that all options are correct (especially --stanza).\n" "HINT: check the PostgreSQL server log for errors.\n" "HINT: run the 'start' command if the stanza was previously stopped."); + harnessLogResult( + "P00 INFO: check repo1 configuration (primary)\n" + "P00 INFO: check repo2 configuration (primary)\n" + "P00 INFO: check repo1 archive for WAL (primary)"); // Create WAL segment Buffer *buffer = bufNew(16 * 1024 * 1024); memset(bufPtr(buffer), 0, bufSize(buffer)); bufUsedSet(buffer, bufSize(buffer)); - // WAL segment is found + // WAL segment switch is performed once for all repos harnessPqScriptSet((HarnessPq []) { HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_92, strZ(pg1Path), false, NULL, NULL), @@ -293,7 +311,13 @@ testRun(void) storagePutP( storageNewWriteP( - storageRepoWrite(), + storageRepoIdxWrite(0), + strNew(STORAGE_REPO_ARCHIVE "/9.2-1/000000010000000100000001-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")), + buffer); + + storagePutP( + storageNewWriteP( + storageRepoIdxWrite(1), strNew(STORAGE_REPO_ARCHIVE "/9.2-1/000000010000000100000001-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")), buffer); @@ -301,9 +325,15 @@ testRun(void) harnessLogResult( strZ( strNewFmt( + "P00 INFO: check repo1 configuration (primary)\n" + "P00 INFO: check repo2 configuration (primary)\n" + "P00 INFO: check repo1 archive for WAL (primary)\n" "P00 INFO: WAL segment 000000010000000100000001 successfully archived to '%s/repo/archive/test1/9.2-1/" - "0000000100000001/000000010000000100000001-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'", - testPath()))); + "0000000100000001/000000010000000100000001-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' on repo1\n" + "P00 INFO: check repo2 archive for WAL (primary)\n" + "P00 INFO: WAL segment 000000010000000100000001 successfully archived to '%s/repo2/archive/test1/9.2-1/" + "0000000100000001/000000010000000100000001-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' on repo2", + testPath(), testPath()))); // Primary == NULL (for test coverage) // ------------------------------------------------------------------------------------------------------------------------- @@ -511,19 +541,24 @@ testRun(void) // Create info files TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - encryption"); + harnessLogResult("P00 INFO: stanza-create for stanza 'test1' on repo1"); // Version mismatch TEST_ERROR_FMT( - checkStanzaInfoPg(storageRepo(), PG_VERSION_94, 6569239123849665679, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStr(cfgOptRepoCipherPass)), FileInvalidError, + checkStanzaInfoPg( + storageRepoIdx(0), PG_VERSION_94, 6569239123849665679, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, 0)), + cfgOptionIdxStr(cfgOptRepoCipherPass, 0)), + FileInvalidError, "backup and archive info files exist but do not match the database\n" "HINT: is this the correct stanza?\n" "HINT: did an error occur during stanza-upgrade?"); // SystemId mismatch TEST_ERROR_FMT( - checkStanzaInfoPg(storageRepo(), PG_VERSION_96, 6569239123849665699, cipherType(cfgOptionStr(cfgOptRepoCipherType)), - cfgOptionStr(cfgOptRepoCipherPass)), FileInvalidError, + checkStanzaInfoPg( + storageRepoIdx(0), PG_VERSION_96, 6569239123849665699, cipherType(cfgOptionIdxStr(cfgOptRepoCipherType, 0)), + cfgOptionIdxStr(cfgOptRepoCipherPass, 0)), + FileInvalidError, "backup and archive info files exist but do not match the database\n" "HINT: is this the correct stanza?\n" "HINT: did an error occur during stanza-upgrade?"); diff --git a/test/src/module/command/expireTest.c b/test/src/module/command/expireTest.c index f6f51e727..db5aa6718 100644 --- a/test/src/module/command/expireTest.c +++ b/test/src/module/command/expireTest.c @@ -846,57 +846,70 @@ testRun(void) "P00 INFO: [DRY-RUN] expire full backup 20181119-152138F\n" "P00 INFO: [DRY-RUN] remove expired backup 20181119-152138F"); - // Save a copy of the info files for a later test - storageCopy( - storageNewReadP(storageTest, backupInfoFileName), - storageNewWriteP(storageTest, strNewFmt("%s%s", strZ(backupInfoFileName), ".save"))); - storageCopy( - storageNewReadP(storageTest, archiveInfoFileName), - storageNewWriteP(storageTest, strNewFmt("%s%s", strZ(archiveInfoFileName), ".save"))); - //-------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("expire via backup command"); + // Copy the repo to another repo + TEST_SYSTEM_FMT("mkdir %s/repo2", testPath()); + TEST_SYSTEM_FMT("cp -r %s/repo/* %s/repo2/", testPath(), testPath()); + + // Configure multi-repo and set the repo option to expire the second repo (non-default) files argList = strLstDup(argListBase); strLstAddZ(argList, "--repo1-retention-full=2"); strLstAddZ(argList, "--repo1-retention-diff=3"); strLstAddZ(argList, "--repo1-retention-archive=2"); strLstAddZ(argList, "--repo1-retention-archive-type=diff"); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionFull, 2, "2"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionDiff, 2, "3"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionArchive, 2, "2"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionArchiveType, 2, "diff"); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); strLstAdd(argList, strNewFmt("--pg1-path=%s/pg", testPath())); harnessCfgLoad(cfgCmdBackup, argList); TEST_RESULT_VOID(cmdExpire(), "via backup command: expire last backup in archive sub path and remove sub path"); TEST_RESULT_BOOL( - storagePathExistsP(storageTest, strNewFmt("%s/%s", strZ(archiveStanzaPath), "9.4-1/0000000100000000")), false, - "archive sub path removed"); + storagePathExistsP(storageTest, STRDEF("repo2/archive/db/9.4-1/0000000100000000")), false, + "archive sub path removed repo2"); + TEST_RESULT_BOOL( + storagePathExistsP(storageTest, strNewFmt("%s/9.4-1/0000000100000000", strZ(archiveStanzaPath))), true, + "archive sub path repo1 not removed"); + + String *backupLabel = strNew("20181119-152138F"); + TEST_ASSIGN( + infoBackup, infoBackupLoadFile(storageTest, STRDEF("repo2/backup/db/backup.info"), cipherTypeNone, NULL), + "get backup.info repo2"); + TEST_RESULT_BOOL(strLstExists(infoBackupDataLabelList(infoBackup, NULL), backupLabel), false, "backup removed from repo2"); + TEST_ASSIGN(infoBackup, infoBackupLoadFile(storageTest, backupInfoFileName, cipherTypeNone, NULL), "get backup.info repo1"); + TEST_RESULT_BOOL(strLstExists(infoBackupDataLabelList(infoBackup, NULL), backupLabel), true, "backup exists repo1"); + harnessLogResult( "P00 INFO: expire full backup 20181119-152138F\n" "P00 INFO: remove expired backup 20181119-152138F"); //-------------------------------------------------------------------------------------------------------------------------- - TEST_TITLE("expire command - no dry run"); + TEST_TITLE("expire command requires repo option"); argList = strLstDup(argListBase); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionFull, 2, "3"); + + TEST_ERROR_FMT( + harnessCfgLoad(cfgCmdExpire, argList), OptionRequiredError, "expire command requires option: repo\n" + "HINT: this command requires a specific repository to operate on"); + + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("expire command - no dry run"); + + // Add to previous list and specify repo strLstAddZ(argList, "--repo1-retention-full=2"); strLstAddZ(argList, "--repo1-retention-diff=3"); strLstAddZ(argList, "--repo1-retention-archive=2"); strLstAddZ(argList, "--repo1-retention-archive-type=diff"); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); harnessCfgLoad(cfgCmdExpire, argList); - // Restore info files from a previous test - storageCopy( - storageNewReadP(storageTest, strNewFmt("%s%s", strZ(backupInfoFileName), ".save")), - storageNewWriteP(storageTest, backupInfoFileName)); - storageCopy( - storageNewReadP(storageTest, strNewFmt("%s%s", strZ(archiveInfoFileName), ".save")), - storageNewWriteP(storageTest, archiveInfoFileName)); - - // Write out manifest and archive that will be removed - storagePutP( - storageNewWriteP(storageTest, strNewFmt("%s/20181119-152138F/" BACKUP_MANIFEST_FILE, strZ(backupStanzaPath))), - BUFSTRDEF("tmp")); - archiveGenerate(storageTest, archiveStanzaPath, 2, 2, "9.4-1", "0000000100000000"); - TEST_RESULT_VOID(cmdExpire(), "expire last backup in archive sub path and remove sub path"); TEST_RESULT_BOOL( storagePathExistsP(storageTest, strNewFmt("%s/%s", strZ(archiveStanzaPath), "9.4-1/0000000100000000")), false, diff --git a/test/src/module/command/infoTest.c b/test/src/module/command/infoTest.c index 56933324a..fa3a8b377 100644 --- a/test/src/module/command/infoTest.c +++ b/test/src/module/command/infoTest.c @@ -3,9 +3,12 @@ Test Info Command ***********************************************************************************************************************************/ #include "storage/posix/storage.h" +#include "common/crypto/cipherBlock.h" #include "common/harnessConfig.h" #include "common/harnessInfo.h" #include "common/harnessFork.h" +#include "common/io/bufferRead.h" +#include "common/io/bufferWrite.h" /*********************************************************************************************************************************** Test Run @@ -53,8 +56,19 @@ testRun(void) "{" "\"archive\":[]," "\"backup\":[]," + "\"cipher\":\"none\"," "\"db\":[]," "\"name\":\"stanza1\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":1," + "\"message\":\"missing stanza path\"" + "}" + "}" + "]," "\"status\":{" "\"code\":1," "\"lock\":{\"backup\":{\"held\":false}}," @@ -97,6 +111,16 @@ testRun(void) "\"cipher\":\"none\"," "\"db\":[]," "\"name\":\"stanza1\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":3," + "\"message\":\"missing stanza data\"" + "}" + "}" + "]," "\"status\":{" "\"code\":3," "\"lock\":{\"backup\":{\"held\":false}}," @@ -110,9 +134,6 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- String *content = strNew ( - "[cipher]\n" - "cipher-pass=\"12345\"\n" - "\n" "[db]\n" "db-catalog-version=201409291\n" "db-control-version=942\n" @@ -151,7 +172,7 @@ testRun(void) content = strNew ( "[db]\n" - "db-id=1\n" + "db-id=3\n" "db-system-id=6569239123849665679\n" "db-version=\"9.4\"\n" "\n" @@ -167,6 +188,10 @@ testRun(void) harnessInfoChecksum(content)), "put archive info to file"); + // Create a WAL directory in 9.3-2 but since there are no WAL files or backups it will not show + String *archiveDb2_1 = strNewFmt("%s/9.3-2/0000000100000000", strZ(archiveStanza1Path)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveDb2_1), "create empty db2 archive WAL1 directory"); + // archive section will cross reference backup db-id 2 to archive db-id 3 but db section will only use the db-ids from // backup.info. Execute while a backup lock is held. HARNESS_FORK_BEGIN() @@ -194,7 +219,8 @@ testRun(void) "\"archive\":[" "{" "\"database\":{" - "\"id\":2" + "\"id\":2," + "\"repo-key\":1" "}," "\"id\":\"9.4-3\"," "\"max\":null," @@ -202,21 +228,33 @@ testRun(void) "}" "]," "\"backup\":[]," - "\"cipher\":\"aes-256-cbc\"," + "\"cipher\":\"none\"," "\"db\":[" "{" "\"id\":1," + "\"repo-key\":1," "\"system-id\":6569239123849665666," "\"version\":\"9.3\"" "}," "{" "\"id\":2," + "\"repo-key\":1," "\"system-id\":6569239123849665679," "\"version\":\"9.4\"" "}" "]," - "\"name\":\"stanza1\"," - "\"status\":{" + "\"name\":\"stanza1\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":2," + "\"message\":\"no valid backups\"" + "}" + "}" + "]," + "\"status\":{" "\"code\":2," "\"lock\":{\"backup\":{\"held\":true}}," "\"message\":\"no valid backups\"" @@ -230,10 +268,10 @@ testRun(void) infoRender(), "stanza: stanza1\n" " status: error (no valid backups, backup/expire running)\n" - " cipher: aes-256-cbc\n" + " cipher: none\n" "\n" " db (current)\n" - " wal archive min/max (9.4-3): none present\n", + " wal archive min/max (9.4): none present\n", "text - single stanza, no valid backups, backup/expire lock detected"); } @@ -243,29 +281,44 @@ testRun(void) // Add WAL segments //-------------------------------------------------------------------------------------------------------------------------- - String *archiveDb3 = strNewFmt("%s/9.4-3/0000000100000000", strZ(archiveStanza1Path)); + String *archiveDb3 = strNewFmt("%s/9.4-3/0000000300000000", strZ(archiveStanza1Path)); TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveDb3), "create db3 archive WAL1 directory"); String *archiveDb3Wal = strNewFmt( - "%s/000000010000000000000004-47dff2b7552a9d66e4bae1a762488a6885e7082c.gz", strZ(archiveDb3)); + "%s/000000030000000000000001-47dff2b7552a9d66e4bae1a762488a6885e7082c.gz", strZ(archiveDb3)); TEST_RESULT_VOID(storagePutP(storageNewWriteP(storageLocalWrite(), archiveDb3Wal), bufNew(0)), "touch WAL3 file"); StringList *argList2 = strLstDup(argListText); strLstAddZ(argList2, "--stanza=stanza1"); + hrnCfgArgKeyRawFmt(argList2, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgRawZ(argList2, cfgOptRepo, "1"); harnessCfgLoad(cfgCmdInfo, argList2); TEST_RESULT_STR_Z( infoRender(), "stanza: stanza1\n" " status: error (no valid backups)\n" - " cipher: aes-256-cbc\n" + " cipher: none\n" "\n" " db (current)\n" - " wal archive min/max (9.4-3): 000000010000000000000004/000000010000000000000004\n", - "text - single stanza, one wal segment"); + " wal archive min/max (9.4): 000000030000000000000001/000000030000000000000001\n", + "text - multi-repo, single stanza, one wal segment"); TEST_RESULT_VOID(storageRemoveP(storageLocalWrite(), archiveDb3Wal, .errorOnMissing = true), "remove WAL file"); + //-------------------------------------------------------------------------------------------------------------------------- + argList2 = strLstDup(argListText); + strLstAddZ(argList2, "--stanza=stanza1"); + hrnCfgArgKeyRawFmt(argList2, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgRawZ(argList2, cfgOptRepo, "2"); + harnessCfgLoad(cfgCmdInfo, argList2); + + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza1\n" + " status: error (missing stanza path)\n", + "text - multi-repo, requested stanza missing on selected repo"); + // Coverage for stanzaStatus branches //-------------------------------------------------------------------------------------------------------------------------- String *archiveDb1_1 = strNewFmt("%s/9.4-1/0000000100000000", strZ(archiveStanza1Path)); @@ -286,6 +339,15 @@ testRun(void) String *archiveDb1_3 = strNewFmt("%s/9.4-1/0000000300000000", strZ(archiveStanza1Path)); TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveDb1_3), "create db1 archive WAL3 directory"); + // Db1 and Db3 have same system-id and db-version so consider them the same for WAL reporting + TEST_RESULT_VOID( + storagePutP(storageNewWriteP(storageLocalWrite(), archiveDb3Wal), bufNew(0)), "create db3 archive WAL3 file"); + + // Create a WAL file in 9.3-2 so that a prior will show + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000001-ac61b8f1ec7b1e6c3eaee9345214595eb7daa9a1.gz", + strZ(archiveDb2_1)))))), 0, "touch WAL1 file in prior"); + harnessCfgLoad(cfgCmdInfo, argList); content = strNew ( @@ -304,6 +366,13 @@ testRun(void) "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":false," "\"option-backup-standby\":false,\"option-checksum-page\":true,\"option-compress\":true,\"option-hardlink\":false," "\"option-online\":true}\n" + "20201116-154900F={\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000030000000000000001\",\"backup-archive-stop\":\"000000030000000000000001\"," + "\"backup-info-repo-size\":3159776,\"backup-info-repo-size-delta\":3159,\"backup-info-size\":26897033," + "\"backup-info-size-delta\":26897033,\"backup-timestamp-start\":1605541676,\"backup-timestamp-stop\":1605541680," + "\"backup-type\":\"full\",\"db-id\":3,\"option-archive-check\":true,\"option-archive-copy\":false," + "\"option-backup-standby\":false,\"option-checksum-page\":true,\"option-compress\":true,\"option-hardlink\":false," + "\"option-online\":true}\n" "\n" "[db:history]\n" "1={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6569239123849665679," @@ -346,7 +415,8 @@ testRun(void) "\"archive\":[" "{" "\"database\":{" - "\"id\":1" + "\"id\":1," + "\"repo-key\":1" "}," "\"id\":\"9.4-1\"," "\"max\":\"000000020000000000000003\"," @@ -354,11 +424,21 @@ testRun(void) "}," "{" "\"database\":{" - "\"id\":3" + "\"id\":2," + "\"repo-key\":1" + "}," + "\"id\":\"9.3-2\"," + "\"max\":\"000000010000000000000001\"," + "\"min\":\"000000010000000000000001\"" + "}," + "{" + "\"database\":{" + "\"id\":3," + "\"repo-key\":1" "}," "\"id\":\"9.4-3\"," - "\"max\":null," - "\"min\":null" + "\"max\":\"000000030000000000000001\"," + "\"min\":\"000000030000000000000001\"" "}" "]," "\"backup\":[" @@ -372,7 +452,8 @@ testRun(void) "\"version\":\"2.04\"" "}," "\"database\":{" - "\"id\":1" + "\"id\":1," + "\"repo-key\":1" "}," "\"info\":{" "\"delta\":26897030," @@ -390,28 +471,71 @@ testRun(void) "\"stop\":1542383289" "}," "\"type\":\"full\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000030000000000000001\"," + "\"stop\":\"000000030000000000000001\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.30\"" + "}," + "\"database\":{" + "\"id\":3," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":26897033," + "\"repository\":{" + "\"delta\":3159," + "\"size\":3159776" + "}," + "\"size\":26897033" + "}," + "\"label\":\"20201116-154900F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1605541676," + "\"stop\":1605541680" + "}," + "\"type\":\"full\"" "}" "]," "\"cipher\":\"none\"," "\"db\":[" "{" "\"id\":1," + "\"repo-key\":1," "\"system-id\":6569239123849665679," "\"version\":\"9.4\"" "}," "{" "\"id\":2," + "\"repo-key\":1," "\"system-id\":6569239123849665666," "\"version\":\"9.3\"" "}," "{" "\"id\":3," + "\"repo-key\":1," "\"system-id\":6569239123849665679," "\"version\":\"9.4\"" "}" "]," - "\"name\":\"stanza1\"," - "\"status\":{" + "\"name\":\"stanza1\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":0," + "\"message\":\"ok\"" + "}" + "}" + "]," + "\"status\":{" "\"code\":0," "\"lock\":{\"backup\":{\"held\":true}}," "\"message\":\"ok\"" @@ -428,24 +552,37 @@ testRun(void) " cipher: none\n" "\n" " db (prior)\n" - " wal archive min/max (9.4-1): 000000010000000000000002/000000020000000000000003\n" + " wal archive min/max (9.3): 000000010000000000000001/000000010000000000000001\n" + "\n" + " db (current)\n" + " wal archive min/max (9.4): 000000010000000000000002/000000030000000000000001\n" "\n" " full backup: 20181116-154756F\n" " timestamp start/stop: 2018-11-16 15:47:56 / 2018-11-16 15:48:09\n" " wal start/stop: n/a\n" " database size: 25.7MB, backup size: 25.7MB\n" - " repository size: 3MB, repository backup size: 3KB\n" + " repo1: size: 3MB, backup size: 3KB\n" "\n" - " db (current)\n" - " wal archive min/max (9.4-3): none present\n", + " full backup: 20201116-154900F\n" + " timestamp start/stop: 2020-11-16 15:47:56 / 2020-11-16 15:48:00\n" + " wal start/stop: 000000030000000000000001 / 000000030000000000000001\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n", "text - single stanza, valid backup, no priors, no archives in latest DB, backup/expire lock detected"); } HARNESS_FORK_PARENT_END(); } HARNESS_FORK_END(); - // backup.info/archive.info files exist, backups exist, archives exist + // Cleanup + storagePathRemoveP(storageLocalWrite(), strNewFmt("%s/9.3-2", strZ(archiveStanza1Path)), .recurse = true); + storagePathRemoveP(storageLocalWrite(), strNewFmt("%s/9.4-3", strZ(archiveStanza1Path)), .recurse = true); + + // backup.info/archive.info files exist, backups exist, archives exist, multi-repo (mixed) with one stanza existing on both + // repos and the db history is different between the repos //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("mixed multi-repo"); + content = strNew ( "[db]\n" @@ -462,7 +599,7 @@ testRun(void) storagePutP( storageNewWriteP(storageLocalWrite(), strNewFmt("%s/archive.info", strZ(archiveStanza1Path))), harnessInfoChecksum(content)), - "put archive info to file - stanza1"); + "put archive info to file - stanza1, repo1"); content = strNew ( @@ -472,26 +609,43 @@ testRun(void) "\"backup-archive-start\":\"000000010000000000000002\",\"backup-archive-stop\":\"000000010000000000000002\"," "\"backup-info-repo-size\":2369186,\"backup-info-repo-size-delta\":2369186," "\"backup-info-size\":20162900,\"backup-info-size-delta\":20162900," - "\"backup-timestamp-start\":1542640898,\"backup-timestamp-stop\":1542640911,\"backup-type\":\"full\"," + "\"backup-timestamp-start\":1542640898,\"backup-timestamp-stop\":1542640899,\"backup-type\":\"full\"," "\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":false,\"option-backup-standby\":false," "\"option-checksum-page\":true,\"option-compress\":true,\"option-hardlink\":false,\"option-online\":true}\n" "20181119-152138F_20181119-152152D={" "\"backrest-format\":5,\"backrest-version\":\"2.08dev\",\"backup-archive-start\":\"000000010000000000000003\"," - "\"backup-archive-stop\":\"000000010000000000000003\",\"backup-info-repo-size\":2369186," + "\"backup-archive-stop\":\"000000020000000000000003\",\"backup-info-repo-size\":2369186," "\"backup-info-repo-size-delta\":346,\"backup-info-size\":20162900,\"backup-info-size-delta\":8428," "\"backup-prior\":\"20181119-152138F\",\"backup-reference\":[\"20181119-152138F\"]," "\"backup-timestamp-start\":1542640912,\"backup-timestamp-stop\":1542640915,\"backup-type\":\"diff\"," "\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":false,\"option-backup-standby\":false," "\"option-checksum-page\":true,\"option-compress\":true,\"option-hardlink\":false,\"option-online\":true}\n" - "20181119-152138F_20181119-152152I={" + "20181119-152138F_20181119-152155I={" "\"backrest-format\":5,\"backrest-version\":\"2.08dev\",\"backup-archive-start\":\"000000010000000000000003\"," "\"backup-info-repo-size\":2369186," "\"backup-info-repo-size-delta\":346,\"backup-info-size\":20162900,\"backup-info-size-delta\":8428," "\"backup-prior\":\"20181119-152138F_20181119-152152D\"," "\"backup-reference\":[\"20181119-152138F\",\"20181119-152138F_20181119-152152D\"]," - "\"backup-timestamp-start\":1542640912,\"backup-timestamp-stop\":1542640915,\"backup-type\":\"incr\"," + "\"backup-timestamp-start\":1542640915,\"backup-timestamp-stop\":1542640917,\"backup-type\":\"incr\"," "\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":false,\"option-backup-standby\":false," "\"option-checksum-page\":true,\"option-compress\":true,\"option-hardlink\":false,\"option-online\":true}\n" + "20201116-155000F={" + "\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000010000000000000002\",\"backup-archive-stop\":\"000000010000000000000003\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1605541800,\"backup-timestamp-stop\":1605541802," + "\"backup-type\":\"full\",\"db-id\":2,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":false,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":false," + "\"option-online\":true}\n" + "20201116-155000F_20201119-152100I={" + "\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000010000000000000005\",\"backup-archive-stop\":\"000000010000000000000005\"," + "\"backup-info-repo-size\":2369186," + "\"backup-info-repo-size-delta\":346,\"backup-info-size\":20162900,\"backup-info-size-delta\":8428," + "\"backup-prior\":\"20201116-155000F\",\"backup-reference\":[\"20201116-155000F\"]," + "\"backup-timestamp-start\":1605799260,\"backup-timestamp-stop\":1605799263,\"backup-type\":\"incr\"," + "\"db-id\":2,\"option-archive-check\":true,\"option-archive-copy\":false,\"option-backup-standby\":false," + "\"option-checksum-page\":true,\"option-compress\":true,\"option-hardlink\":false,\"option-online\":true}\n" "\n" "[db]\n" "db-catalog-version=201510051\n" @@ -511,10 +665,9 @@ testRun(void) storagePutP( storageNewWriteP(storageLocalWrite(), strNewFmt("%s/backup.info", strZ(backupStanza1Path))), harnessInfoChecksum(content)), - "put backup info to file - stanza1"); + "put backup info to file - stanza1, repo1"); // Manifest with all features - // ------------------------------------------------------------------------------------------------------------------------- #define TEST_MANIFEST_HEADER \ "[backup]\n" \ "backup-archive-start=\"000000030000028500000089\"\n" \ @@ -634,13 +787,13 @@ testRun(void) TEST_RESULT_VOID( storagePutP(storageNewWriteP(storageLocalWrite(), - strNewFmt("%s/20181119-152138F_20181119-152152I/" BACKUP_MANIFEST_FILE, strZ(backupStanza1Path))), contentLoad), - "write manifest - stanza1"); + strNewFmt("%s/20181119-152138F_20181119-152155I/" BACKUP_MANIFEST_FILE, strZ(backupStanza1Path))), contentLoad), + "write manifest - stanza1, repo1"); String *archiveStanza2Path = strNewFmt("%s/stanza2", strZ(archivePath)); String *backupStanza2Path = strNewFmt("%s/stanza2", strZ(backupPath)); - TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), backupStanza1Path), "backup stanza2 directory"); - TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveStanza1Path), "archive stanza2 directory"); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), backupStanza1Path), "backup path stanza2 directory, repo1"); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveStanza1Path), "archive path stanza2 directory, repo1"); content = strNew ( @@ -657,7 +810,7 @@ testRun(void) storagePutP( storageNewWriteP(storageLocalWrite(), strNewFmt("%s/archive.info", strZ(archiveStanza2Path))), harnessInfoChecksum(content)), - "put archive info to file - stanza2"); + "put archive info to file - stanza2, repo1"); content = strNew ( @@ -677,221 +830,753 @@ testRun(void) storagePutP( storageNewWriteP(storageLocalWrite(), strNewFmt("%s/backup.info", strZ(backupStanza2Path))), harnessInfoChecksum(content)), - "put backup info to file - stanza2"); + "put backup info to file - stanza2, repo1"); - harnessCfgLoad(cfgCmdInfo, argList); - TEST_RESULT_STR_Z( - infoRender(), - "[" - "{" - "\"archive\":[" - "{" - "\"database\":{" - "\"id\":1" - "}," - "\"id\":\"9.4-1\"," - "\"max\":\"000000020000000000000003\"," - "\"min\":\"000000010000000000000002\"" - "}," - "{" - "\"database\":{" - "\"id\":2" - "}," - "\"id\":\"9.5-2\"," - "\"max\":null," - "\"min\":null" - "}" - "]," - "\"backup\":[" - "{" - "\"archive\":{" - "\"start\":\"000000010000000000000002\"," - "\"stop\":\"000000010000000000000002\"" - "}," - "\"backrest\":{" - "\"format\":5," - "\"version\":\"2.08dev\"" - "}," - "\"database\":{" - "\"id\":1" - "}," - "\"info\":{" - "\"delta\":20162900," - "\"repository\":{" - "\"delta\":2369186," - "\"size\":2369186" - "}," - "\"size\":20162900" - "}," - "\"label\":\"20181119-152138F\"," - "\"prior\":null," - "\"reference\":null," - "\"timestamp\":{" - "\"start\":1542640898," - "\"stop\":1542640911" - "}," - "\"type\":\"full\"" - "}," - "{" - "\"archive\":{" - "\"start\":\"000000010000000000000003\"," - "\"stop\":\"000000010000000000000003\"" - "}," - "\"backrest\":{" - "\"format\":5," - "\"version\":\"2.08dev\"" - "}," - "\"database\":{" - "\"id\":1" - "}," - "\"info\":{" - "\"delta\":8428," - "\"repository\":{" - "\"delta\":346," - "\"size\":2369186" - "}," - "\"size\":20162900" - "}," - "\"label\":\"20181119-152138F_20181119-152152D\"," - "\"prior\":\"20181119-152138F\"," - "\"reference\":[" - "\"20181119-152138F\"" - "]," - "\"timestamp\":{" - "\"start\":1542640912," - "\"stop\":1542640915" - "}," - "\"type\":\"diff\"" - "}," - "{" - "\"archive\":{" - "\"start\":\"000000010000000000000003\"," - "\"stop\":null" - "}," - "\"backrest\":{" - "\"format\":5," - "\"version\":\"2.08dev\"" - "}," - "\"database\":{" - "\"id\":1" - "}," - "\"info\":{" - "\"delta\":8428," - "\"repository\":{" - "\"delta\":346," - "\"size\":2369186" - "}," - "\"size\":20162900" - "}," - "\"label\":\"20181119-152138F_20181119-152152I\"," - "\"prior\":\"20181119-152138F_20181119-152152D\"," - "\"reference\":[" - "\"20181119-152138F\"," - "\"20181119-152138F_20181119-152152D\"" - "]," - "\"timestamp\":{" - "\"start\":1542640912," - "\"stop\":1542640915" - "}," - "\"type\":\"incr\"" - "}" - "]," - "\"cipher\":\"none\"," - "\"db\":[" - "{" - "\"id\":1," - "\"system-id\":6625592122879095702," - "\"version\":\"9.4\"" - "}," - "{" - "\"id\":2," - "\"system-id\":6626363367545678089," - "\"version\":\"9.5\"" - "}" - "]," - "\"name\":\"stanza1\"," - "\"status\":{" - "\"code\":0," - "\"lock\":{\"backup\":{\"held\":false}}," - "\"message\":\"ok\"" - "}" - "}," - "{" - "\"archive\":[" - "{" - "\"database\":{" - "\"id\":1" - "}," - "\"id\":\"9.4-1\"," - "\"max\":null," - "\"min\":null" - "}" - "]," - "\"backup\":[]," - "\"cipher\":\"none\"," - "\"db\":[" - "{" - "\"id\":1," - "\"system-id\":6625633699176220261," - "\"version\":\"9.4\"" - "}" - "]," - "\"name\":\"stanza2\"," - "\"status\":{" - "\"code\":2," - "\"lock\":{\"backup\":{\"held\":false}}," - "\"message\":\"no valid backups\"" - "}" - "}" - "]", - "json - multiple stanzas, one with valid backups, archives in latest DB"); + // Create encrypted repo2 + String *repo2archivePath = strNewFmt("%s/repo2/archive", testPath()); + String *repo2backupPath = strNewFmt("%s/repo2/backup", testPath()); + storagePathCreateP(storageLocalWrite(), strNewFmt("%s/stanza1", strZ(repo2archivePath))); + storagePathCreateP(storageLocalWrite(), strNewFmt("%s/stanza1", strZ(repo2backupPath))); - harnessCfgLoad(cfgCmdInfo, argListText); - TEST_RESULT_STR_Z( - infoRender(), - "stanza: stanza1\n" - " status: ok\n" - " cipher: none\n" + // Write encrypted info files + content = strNew + ( + "[db]\n" + "db-id=1\n" + "db-system-id=6626363367545678089\n" + "db-version=\"9.5\"\n" "\n" - " db (prior)\n" - " wal archive min/max (9.4-1): 000000010000000000000002/000000020000000000000003\n" + "[cipher]\n" + "cipher-pass=\"" TEST_CIPHER_PASS_ARCHIVE "\"\n" "\n" - " full backup: 20181119-152138F\n" - " timestamp start/stop: 2018-11-19 15:21:38 / 2018-11-19 15:21:51\n" - " wal start/stop: 000000010000000000000002 / 000000010000000000000002\n" - " database size: 19.2MB, backup size: 19.2MB\n" - " repository size: 2.3MB, repository backup size: 2.3MB\n" + "[db:history]\n" + "1={\"db-id\":6626363367545678089,\"db-version\":\"9.5\"}\n" + ); + + String *filePathName = strNewFmt("%s/stanza1/archive.info", strZ(repo2archivePath)); + StorageWrite *write = storageNewWriteP(storageLocalWrite(), filePathName); + IoFilterGroup *filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, BUFSTRDEF(TEST_CIPHER_PASS), NULL)); + TEST_RESULT_VOID(storagePutP(write, harnessInfoChecksum(content)), "write encrypted archive.info, repo2"); + + content = strNew + ( + "[db]\n" + "db-catalog-version=201510051\n" + "db-control-version=942\n" + "db-id=1\n" + "db-system-id=6626363367545678089\n" + "db-version=\"9.5\"\n" "\n" - " diff backup: 20181119-152138F_20181119-152152D\n" - " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" - " wal start/stop: 000000010000000000000003 / 000000010000000000000003\n" - " database size: 19.2MB, backup size: 8.2KB\n" - " repository size: 2.3MB, repository backup size: 346B\n" - " backup reference list: 20181119-152138F\n" + "[backup:current]\n" + "20201116-200000F={\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000010000000000000004\",\"backup-archive-stop\":\"000000010000000000000004\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1605556800,\"backup-timestamp-stop\":1605556805," + "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":true,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":true," + "\"option-online\":true}\n" "\n" - " incr backup: 20181119-152138F_20181119-152152I\n" - " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" - " wal start/stop: n/a\n" - " database size: 19.2MB, backup size: 8.2KB\n" - " repository size: 2.3MB, repository backup size: 346B\n" - " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" + "[cipher]\n" + "cipher-pass=\"somepass\"\n" "\n" - " db (current)\n" - " wal archive min/max (9.5-2): none present\n" + "[db:history]\n" + "1={\"db-catalog-version\":201510051,\"db-control-version\":942,\"db-system-id\":6626363367545678089," + "\"db-version\":\"9.5\"}\n" + ); + + filePathName = strNewFmt("%s/stanza1/backup.info", strZ(repo2backupPath)); + write = storageNewWriteP(storageLocalWrite(), filePathName); + filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, BUFSTRDEF(TEST_CIPHER_PASS), NULL)); + TEST_RESULT_VOID(storagePutP(write, harnessInfoChecksum(content)), "write encrypted backup.info, repo2"); + + // Add WAL on repo1 and encrypted repo2 for stanza1 + String *archive1Db1_1 = strNewFmt("%s/9.5-2/0000000100000000", strZ(archiveStanza1Path)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archive1Db1_1), "create db1 archive WAL directory, repo1"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000002-ac61b8f1ec7b1e6c3eaee9345214595eb7daa9a1.gz", + strZ(archive1Db1_1)))))), 0, "touch WAL file, repo1"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000003-37dff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(archive1Db1_1)))))), 0, "touch WAL file, repo1"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000004-ee61b8f1ec7b1e6c3eaee9345214595eb7daa9a1.gz", + strZ(archive1Db1_1)))))), 0, "touch WAL file, repo1"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000005-abc123f1ec7b1e6c3eaee9345214595eb7daa9a1.gz", + strZ(archive1Db1_1)))))), 0, "touch WAL file, repo1"); + + String *archive2Db1_1 = strNewFmt("%s/stanza1/9.5-1/0000000100000000", strZ(repo2archivePath)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archive2Db1_1), "create db1 archive WAL directory, repo2"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000003-37dff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(archive2Db1_1)))))), 0, "touch WAL file, repo2"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000004-ff61b8f1ec7b1e6c3eaee9345214595eb7daa9a1.gz", + strZ(archive2Db1_1)))))), 0, "touch WAL file, repo2"); + + // Add a manifest on the encrypted repo2 + #define TEST_MANIFEST_HEADER2 \ + "[backup]\n" \ + "backup-archive-start=\"000000010000000000000004\"\n" \ + "backup-archive-stop=\"000000010000000000000004\"\n" \ + "backup-label=\"20201116-200000F\"\n" \ + "backup-timestamp-copy-start=1605556800\n" \ + "backup-timestamp-start=1605556800\n" \ + "backup-timestamp-stop=1605556802\n" \ + "backup-type=\"full\"\n" \ + "\n" \ + "[backup:db]\n" \ + "db-catalog-version=201510051\n" \ + "db-control-version=942\n" \ + "db-id=1\n" \ + "db-system-id=6626363367545678089\n" \ + "db-version=\"9.5\"\n" \ + "\n" \ + "[backup:option]\n" \ + "option-archive-check=true\n" \ + "option-archive-copy=true\n" \ + "option-backup-standby=true\n" \ + "option-buffer-size=16384\n" \ + "option-checksum-page=false\n" \ + "option-compress=false\n" \ + "option-compress-level=3\n" \ + "option-compress-level-network=3\n" \ + "option-delta=false\n" \ + "option-hardlink=true\n" \ + "option-online=true\n" \ + "option-process-max=32\n" \ + + contentLoad = harnessInfoChecksumZ + ( + TEST_MANIFEST_HEADER2 + TEST_MANIFEST_TARGET "\n" - "stanza: stanza2\n" - " status: error (no valid backups)\n" - " cipher: none\n" + "[cipher]\n" + "cipher-pass=\"someotherpass\"\n" + TEST_MANIFEST_DB + TEST_MANIFEST_FILE + TEST_MANIFEST_FILE_DEFAULT + TEST_MANIFEST_LINK + TEST_MANIFEST_LINK_DEFAULT + TEST_MANIFEST_PATH + TEST_MANIFEST_PATH_DEFAULT + ); + + // Create encrypted manifest file + storagePathCreateP(storageLocalWrite(), strNewFmt("%s/stanza1/20201116-200000F", strZ(repo2backupPath))); + filePathName = strNewFmt("%s/stanza1/20201116-200000F/" BACKUP_MANIFEST_FILE, strZ(repo2backupPath)); + write = storageNewWriteP(storageLocalWrite(), filePathName); + filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, BUFSTRDEF("somepass"), NULL)); + TEST_RESULT_VOID(storagePutP(write, contentLoad), "write encrypted manifest, repo2"); + + // Create a stanza on repo2 that is not on repo1 + content = strNew + ( + "[db]\n" + "db-id=1\n" + "db-system-id=6626363367545678089\n" + "db-version=\"9.4\"\n" "\n" - " db (current)\n" - " wal archive min/max (9.4-1): none present\n", - "text - multiple stanzas, one with valid backups, archives in latest DB"); + "[cipher]\n" + "cipher-pass=\"" TEST_CIPHER_PASS_ARCHIVE "\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6626363367545678089,\"db-version\":\"9.4\"}\n" + ); + + filePathName = strNewFmt("%s/stanza3/archive.info", strZ(repo2archivePath)); + write = storageNewWriteP(storageLocalWrite(), filePathName); + filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, BUFSTRDEF(TEST_CIPHER_PASS), NULL)); + TEST_RESULT_VOID(storagePutP(write, harnessInfoChecksum(content)), "write encrypted archive.info, repo2, stanza3"); + + content = strNew + ( + "[db]\n" + "db-catalog-version=201409291\n" + "db-control-version=942\n" + "db-id=1\n" + "db-system-id=6626363367545678089\n" + "db-version=\"9.4\"\n" + "\n" + "[backup:current]\n" + "20201110-100000F={\"backrest-format\":5,\"backrest-version\":\"2.25\"," + "\"backup-archive-start\":\"000000010000000000000001\",\"backup-archive-stop\":\"000000010000000000000002\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1605002400,\"backup-timestamp-stop\":1605002402," + "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":true,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":true," + "\"option-online\":true}\n" + "\n" + "[cipher]\n" + "cipher-pass=\"somepass\"\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6626363367545678089," + "\"db-version\":\"9.4\"}\n" + ); + + filePathName = strNewFmt("%s/stanza3/backup.info", strZ(repo2backupPath)); + write = storageNewWriteP(storageLocalWrite(), filePathName); + filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, BUFSTRDEF(TEST_CIPHER_PASS), NULL)); + TEST_RESULT_VOID(storagePutP(write, harnessInfoChecksum(content)), "write encrypted backup.info, repo2, stanza3"); + + archive2Db1_1 = strNewFmt("%s/stanza3/9.4-1/0000000100000000", strZ(repo2archivePath)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archive2Db1_1), "create db1 archive WAL directory, repo2"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000001-11dff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(archive2Db1_1)))))), 0, "touch WAL file, repo2"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000002-2261b8f1ec7b1e6c3eaee9345214595eb7daa9a1.gz", + strZ(archive2Db1_1)))))), 0, "touch WAL file, repo2"); + + // Set up the configuration + StringList *argListMultiRepo = strLstNew(); + hrnCfgArgRawZ(argListMultiRepo, cfgOptRepoPath, TEST_PATH_REPO); + hrnCfgArgKeyRawFmt(argListMultiRepo, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawZ(argListMultiRepo, cfgOptRepoCipherType, 2, CIPHER_TYPE_AES_256_CBC); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, TEST_CIPHER_PASS); + + StringList *argListMultiRepoJson = strLstDup(argListMultiRepo); + hrnCfgArgRawZ(argListMultiRepoJson, cfgOptOutput, "json"); + + HARNESS_FORK_BEGIN() + { + HARNESS_FORK_CHILD_BEGIN(0, false) + { + TEST_RESULT_INT_NE( + lockAcquire(cfgOptionStr(cfgOptLockPath), strNew("stanza2"), STRDEF("999-ffffffff"), lockTypeBackup, 0, true), + -1, "create backup/expire lock"); + + sleepMSec(1000); + lockRelease(true); + } + HARNESS_FORK_CHILD_END(); + + HARNESS_FORK_PARENT_BEGIN() + { + sleepMSec(250); + + harnessCfgLoad(cfgCmdInfo, argListMultiRepoJson); + TEST_RESULT_STR_Z( + infoRender(), + "[" + "{" + "\"archive\":[" + "{" + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"id\":\"9.4-1\"," + "\"max\":\"000000020000000000000003\"," + "\"min\":\"000000010000000000000002\"" + "}," + "{" + "\"database\":{" + "\"id\":2," + "\"repo-key\":1" + "}," + "\"id\":\"9.5-2\"," + "\"max\":\"000000010000000000000005\"," + "\"min\":\"000000010000000000000002\"" + "}," + "{" + "\"database\":{" + "\"id\":1," + "\"repo-key\":2" + "}," + "\"id\":\"9.5-1\"," + "\"max\":\"000000010000000000000004\"," + "\"min\":\"000000010000000000000003\"" + "}" + "]," + "\"backup\":[" + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000002\"," + "\"stop\":\"000000010000000000000002\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.08dev\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":20162900," + "\"repository\":{" + "\"delta\":2369186," + "\"size\":2369186" + "}," + "\"size\":20162900" + "}," + "\"label\":\"20181119-152138F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1542640898," + "\"stop\":1542640899" + "}," + "\"type\":\"full\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000003\"," + "\"stop\":\"000000020000000000000003\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.08dev\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":8428," + "\"repository\":{" + "\"delta\":346," + "\"size\":2369186" + "}," + "\"size\":20162900" + "}," + "\"label\":\"20181119-152138F_20181119-152152D\"," + "\"prior\":\"20181119-152138F\"," + "\"reference\":[" + "\"20181119-152138F\"" + "]," + "\"timestamp\":{" + "\"start\":1542640912," + "\"stop\":1542640915" + "}," + "\"type\":\"diff\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000003\"," + "\"stop\":null" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.08dev\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":8428," + "\"repository\":{" + "\"delta\":346," + "\"size\":2369186" + "}," + "\"size\":20162900" + "}," + "\"label\":\"20181119-152138F_20181119-152155I\"," + "\"prior\":\"20181119-152138F_20181119-152152D\"," + "\"reference\":[" + "\"20181119-152138F\"," + "\"20181119-152138F_20181119-152152D\"" + "]," + "\"timestamp\":{" + "\"start\":1542640915," + "\"stop\":1542640917" + "}," + "\"type\":\"incr\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000002\"," + "\"stop\":\"000000010000000000000003\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.30\"" + "}," + "\"database\":{" + "\"id\":2," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":26897020," + "\"repository\":{" + "\"delta\":3100," + "\"size\":3159000" + "}," + "\"size\":26897000" + "}," + "\"label\":\"20201116-155000F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1605541800," + "\"stop\":1605541802" + "}," + "\"type\":\"full\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000004\"," + "\"stop\":\"000000010000000000000004\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.30\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":2" + "}," + "\"info\":{" + "\"delta\":26897020," + "\"repository\":{" + "\"delta\":3100," + "\"size\":3159000" + "}," + "\"size\":26897000" + "}," + "\"label\":\"20201116-200000F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1605556800," + "\"stop\":1605556805" + "}," + "\"type\":\"full\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000005\"," + "\"stop\":\"000000010000000000000005\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.30\"" + "}," + "\"database\":{" + "\"id\":2," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":8428," + "\"repository\":{" + "\"delta\":346," + "\"size\":2369186" + "}," + "\"size\":20162900" + "}," + "\"label\":\"20201116-155000F_20201119-152100I\"," + "\"prior\":\"20201116-155000F\"," + "\"reference\":[" + "\"20201116-155000F\"" + "]," + "\"timestamp\":{" + "\"start\":1605799260," + "\"stop\":1605799263" + "}," + "\"type\":\"incr\"" + "}" + "]," + "\"cipher\":\"mixed\"," + "\"db\":[" + "{" + "\"id\":1," + "\"repo-key\":1," + "\"system-id\":6625592122879095702," + "\"version\":\"9.4\"" + "}," + "{" + "\"id\":2," + "\"repo-key\":1," + "\"system-id\":6626363367545678089," + "\"version\":\"9.5\"" + "}," + "{" + "\"id\":1," + "\"repo-key\":2," + "\"system-id\":6626363367545678089," + "\"version\":\"9.5\"" + "}" + "]," + "\"name\":\"stanza1\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":0," + "\"message\":\"ok\"" + "}" + "}," + "{" + "\"cipher\":\"aes-256-cbc\"," + "\"key\":2," + "\"status\":{" + "\"code\":0," + "\"message\":\"ok\"" + "}" + "}" + "]," + "\"status\":{" + "\"code\":0," + "\"lock\":{\"backup\":{\"held\":false}}," + "\"message\":\"ok\"" + "}" + "}," + "{" + "\"archive\":[" + "{" + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"id\":\"9.4-1\"," + "\"max\":null," + "\"min\":null" + "}" + "]," + "\"backup\":[]," + "\"cipher\":\"mixed\"," + "\"db\":[" + "{" + "\"id\":1," + "\"repo-key\":1," + "\"system-id\":6625633699176220261," + "\"version\":\"9.4\"" + "}" + "]," + "\"name\":\"stanza2\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":2," + "\"message\":\"no valid backups\"" + "}" + "}," + "{" + "\"cipher\":\"aes-256-cbc\"," + "\"key\":2," + "\"status\":{" + "\"code\":1," + "\"message\":\"missing stanza path\"" + "}" + "}" + "]," + "\"status\":{" + "\"code\":4," + "\"lock\":{\"backup\":{\"held\":true}}," + "\"message\":\"different across repos\"" + "}" + "}," + "{" + "\"archive\":[" + "{" + "\"database\":{" + "\"id\":1," + "\"repo-key\":2" + "}," + "\"id\":\"9.4-1\"," + "\"max\":\"000000010000000000000002\"," + "\"min\":\"000000010000000000000001\"" + "}" + "]," + "\"backup\":[" + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000001\"," + "\"stop\":\"000000010000000000000002\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.25\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":2" + "}," + "\"info\":{" + "\"delta\":26897020," + "\"repository\":{" + "\"delta\":3100," + "\"size\":3159000" + "}," + "\"size\":26897000" + "}," + "\"label\":\"20201110-100000F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1605002400," + "\"stop\":1605002402" + "}," + "\"type\":\"full\"" + "}" + "]," + "\"cipher\":\"mixed\"," + "\"db\":[" + "{" + "\"id\":1," + "\"repo-key\":2," + "\"system-id\":6626363367545678089," + "\"version\":\"9.4\"" + "}" + "]," + "\"name\":\"stanza3\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":1," + "\"message\":\"missing stanza path\"" + "}" + "}," + "{" + "\"cipher\":\"aes-256-cbc\"," + "\"key\":2," + "\"status\":{" + "\"code\":0," + "\"message\":\"ok\"" + "}" + "}" + "]," + "\"status\":{" + "\"code\":4," + "\"lock\":{\"backup\":{\"held\":false}}," + "\"message\":\"different across repos\"" + "}" + "}" + "]", + "json - multiple stanzas, some with valid backups, archives in latest DB, backup lock held on one stanza"); + } + HARNESS_FORK_PARENT_END(); + } + HARNESS_FORK_END(); + + HARNESS_FORK_BEGIN() + { + HARNESS_FORK_CHILD_BEGIN(0, false) + { + TEST_RESULT_INT_NE( + lockAcquire(cfgOptionStr(cfgOptLockPath), strNew("stanza2"), STRDEF("999-ffffffff"), lockTypeBackup, 0, true), + -1, "create backup/expire lock"); + + sleepMSec(1000); + lockRelease(true); + } + HARNESS_FORK_CHILD_END(); + + HARNESS_FORK_PARENT_BEGIN() + { + sleepMSec(250); + + harnessCfgLoad(cfgCmdInfo, argListMultiRepo); + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza1\n" + " status: ok\n" + " cipher: mixed\n" + " repo1: none\n" + " repo2: aes-256-cbc\n" + "\n" + " db (prior)\n" + " wal archive min/max (9.4): 000000010000000000000002/000000020000000000000003\n" + "\n" + " full backup: 20181119-152138F\n" + " timestamp start/stop: 2018-11-19 15:21:38 / 2018-11-19 15:21:39\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000002\n" + " database size: 19.2MB, backup size: 19.2MB\n" + " repo1: size: 2.3MB, backup size: 2.3MB\n" + "\n" + " diff backup: 20181119-152138F_20181119-152152D\n" + " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" + " wal start/stop: 000000010000000000000003 / 000000020000000000000003\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20181119-152138F\n" + "\n" + " incr backup: 20181119-152138F_20181119-152155I\n" + " timestamp start/stop: 2018-11-19 15:21:55 / 2018-11-19 15:21:57\n" + " wal start/stop: n/a\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" + "\n" + " db (current)\n" + " wal archive min/max (9.5): 000000010000000000000002/000000010000000000000005\n" + "\n" + " full backup: 20201116-155000F\n" + " timestamp start/stop: 2020-11-16 15:50:00 / 2020-11-16 15:50:02\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000003\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n" + "\n" + " full backup: 20201116-200000F\n" + " timestamp start/stop: 2020-11-16 20:00:00 / 2020-11-16 20:00:05\n" + " wal start/stop: 000000010000000000000004 / 000000010000000000000004\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo2: size: 3MB, backup size: 3KB\n" + "\n" + " incr backup: 20201116-155000F_20201119-152100I\n" + " timestamp start/stop: 2020-11-19 15:21:00 / 2020-11-19 15:21:03\n" + " wal start/stop: 000000010000000000000005 / 000000010000000000000005\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20201116-155000F\n" + "\n" + "stanza: stanza2\n" + " status: mixed (backup/expire running)\n" + " repo1: error (no valid backups)\n" + " repo2: error (missing stanza path)\n" + " cipher: mixed\n" + " repo1: none\n" + " repo2: aes-256-cbc\n" + "\n" + " db (current)\n" + " wal archive min/max (9.4): none present\n" + "\n" + "stanza: stanza3\n" + " status: mixed\n" + " repo1: error (missing stanza path)\n" + " repo2: ok\n" + " cipher: mixed\n" + " repo1: none\n" + " repo2: aes-256-cbc\n" + "\n" + " db (current)\n" + " wal archive min/max (9.4): 000000010000000000000001/000000010000000000000002\n" + "\n" + " full backup: 20201110-100000F\n" + " timestamp start/stop: 2020-11-10 10:00:00 / 2020-11-10 10:00:02\n" + " wal start/stop: 000000010000000000000001 / 000000010000000000000002\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo2: size: 3MB, backup size: 3KB\n", + "text - multiple stanzas, multi-repo with valid backups, backup lock held on one stanza"); + } + HARNESS_FORK_PARENT_END(); + } + HARNESS_FORK_END(); // Backup set requested, with 1 checksum error //-------------------------------------------------------------------------------------------------------------------------- - argList2 = strLstDup(argListText); + argList2 = strLstDup(argListMultiRepo); strLstAddZ(argList2, "--stanza=stanza1"); - strLstAddZ(argList2, "--set=20181119-152138F_20181119-152152I"); + strLstAddZ(argList2, "--set=20181119-152138F_20181119-152155I"); + harnessCfgLoad(cfgCmdInfo, argList2); + + TEST_ERROR_FMT(infoRender(), OptionRequiredError, "option '" CFGOPT_REPO "' is required when specifying a backup set"); + + // Specify the repo + strLstAddZ(argList2, "--repo=1"); harnessCfgLoad(cfgCmdInfo, argList2); TEST_RESULT_STR_Z( @@ -901,13 +1586,13 @@ testRun(void) " cipher: none\n" "\n" " db (prior)\n" - " wal archive min/max (9.4-1): 000000010000000000000002/000000020000000000000003\n" + " wal archive min/max (9.4): 000000010000000000000002/000000020000000000000003\n" "\n" - " incr backup: 20181119-152138F_20181119-152152I\n" - " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" + " incr backup: 20181119-152138F_20181119-152155I\n" + " timestamp start/stop: 2018-11-19 15:21:55 / 2018-11-19 15:21:57\n" " wal start/stop: n/a\n" " database size: 19.2MB, backup size: 8.2KB\n" - " repository size: 2.3MB, repository backup size: 346B\n" + " repo1: size: 2.3MB, backup size: 346B\n" " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" " database list: mail (16456), postgres (12173)\n" " symlinks:\n" @@ -919,6 +1604,39 @@ testRun(void) " page checksum error: base/16384/17000\n", "text - backup set requested"); + // Confirm ability to read encrypted repo manifest + //-------------------------------------------------------------------------------------------------------------------------- + argList2 = strLstDup(argListMultiRepo); + strLstAddZ(argList2, "--stanza=stanza1"); + strLstAddZ(argList2, "--set=20201116-200000F"); + strLstAddZ(argList2, "--repo=2"); + harnessCfgLoad(cfgCmdInfo, argList2); + + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza1\n" + " status: ok\n" + " cipher: aes-256-cbc\n" + "\n" + " db (current)\n" + " wal archive min/max (9.5): 000000010000000000000003/000000010000000000000004\n" + "\n" + " full backup: 20201116-200000F\n" + " timestamp start/stop: 2020-11-16 20:00:00 / 2020-11-16 20:00:05\n" + " wal start/stop: 000000010000000000000004 / 000000010000000000000004\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo2: size: 3MB, backup size: 3KB\n" + " database list: mail (16456), postgres (12173)\n" + " symlinks:\n" + " pg_hba.conf => ../pg_config/pg_hba.conf\n" + " pg_stat => ../pg_stat\n" + " tablespaces:\n" + " ts1 (1) => /tblspc/ts1\n" + " ts12 (12) => /tblspc/ts12\n" + " page checksum error: base/16384/17000\n", + "text - multi-repo, backup set requested, repo2"); + + //-------------------------------------------------------------------------------------------------------------------------- strLstAddZ(argList2, "--output=json"); harnessCfgLoad(cfgCmdInfo, argList2); @@ -928,7 +1646,8 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- argList2 = strLstDup(argListText); strLstAddZ(argList2, "--stanza=stanza1"); - strLstAddZ(argList2, "--set=20181119-152138F_20181119-152152I"); + strLstAddZ(argList2, "--set=20181119-152138F_20181119-152155I"); + strLstAddZ(argList2, "--repo=1"); harnessCfgLoad(cfgCmdInfo, argList2); #define TEST_MANIFEST_TARGET_NO_LINK \ @@ -969,7 +1688,7 @@ testRun(void) TEST_RESULT_VOID( storagePutP( storageNewWriteP( - storageRepoWrite(), strNew(STORAGE_REPO_BACKUP "/20181119-152138F_20181119-152152I/" BACKUP_MANIFEST_FILE)), + storageRepoWrite(), strNew(STORAGE_REPO_BACKUP "/20181119-152138F_20181119-152155I/" BACKUP_MANIFEST_FILE)), contentLoad), "write manifest"); @@ -980,13 +1699,13 @@ testRun(void) " cipher: none\n" "\n" " db (prior)\n" - " wal archive min/max (9.4-1): 000000010000000000000002/000000020000000000000003\n" + " wal archive min/max (9.4): 000000010000000000000002/000000020000000000000003\n" "\n" - " incr backup: 20181119-152138F_20181119-152152I\n" - " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" + " incr backup: 20181119-152138F_20181119-152155I\n" + " timestamp start/stop: 2018-11-19 15:21:55 / 2018-11-19 15:21:57\n" " wal start/stop: n/a\n" " database size: 19.2MB, backup size: 8.2KB\n" - " repository size: 2.3MB, repository backup size: 346B\n" + " repo1: size: 2.3MB, backup size: 346B\n" " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" " database list: mail (16456), postgres (12173)\n" " page checksum error: base/16384/17000, base/32768/33000\n", @@ -996,7 +1715,8 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- argList2 = strLstDup(argListText); strLstAddZ(argList2, "--stanza=stanza1"); - strLstAddZ(argList2, "--set=20181119-152138F_20181119-152152I"); + strLstAddZ(argList2, "--set=20181119-152138F_20181119-152155I"); + strLstAddZ(argList2, "--repo=1"); harnessCfgLoad(cfgCmdInfo, argList2); #define TEST_MANIFEST_NO_DB \ @@ -1038,7 +1758,7 @@ testRun(void) TEST_RESULT_VOID( storagePutP( storageNewWriteP( - storageRepoWrite(), strNew(STORAGE_REPO_BACKUP "/20181119-152138F_20181119-152152I/" BACKUP_MANIFEST_FILE)), + storageRepoWrite(), strNew(STORAGE_REPO_BACKUP "/20181119-152138F_20181119-152155I/" BACKUP_MANIFEST_FILE)), contentLoad), "write manifest"); @@ -1049,21 +1769,22 @@ testRun(void) " cipher: none\n" "\n" " db (prior)\n" - " wal archive min/max (9.4-1): 000000010000000000000002/000000020000000000000003\n" + " wal archive min/max (9.4): 000000010000000000000002/000000020000000000000003\n" "\n" - " incr backup: 20181119-152138F_20181119-152152I\n" - " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" + " incr backup: 20181119-152138F_20181119-152155I\n" + " timestamp start/stop: 2018-11-19 15:21:55 / 2018-11-19 15:21:57\n" " wal start/stop: n/a\n" " database size: 19.2MB, backup size: 8.2KB\n" - " repository size: 2.3MB, repository backup size: 346B\n" + " repo1: size: 2.3MB, backup size: 346B\n" " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" " database list: none\n", "text - backup set requested, no db and no checksum error"); // Stanza found //-------------------------------------------------------------------------------------------------------------------------- - strLstAddZ(argList, "--stanza=stanza2"); - harnessCfgLoad(cfgCmdInfo, argList); + argList2 = strLstDup(argList); + strLstAddZ(argList2, "--stanza=stanza2"); + harnessCfgLoad(cfgCmdInfo, argList2); TEST_RESULT_STR_Z( infoRender(), "[" @@ -1071,7 +1792,8 @@ testRun(void) "\"archive\":[" "{" "\"database\":{" - "\"id\":1" + "\"id\":1," + "\"repo-key\":1" "}," "\"id\":\"9.4-1\"," "\"max\":null," @@ -1083,34 +1805,498 @@ testRun(void) "\"db\":[" "{" "\"id\":1," + "\"repo-key\":1," "\"system-id\":6625633699176220261," "\"version\":\"9.4\"" "}" "]," - "\"name\":\"stanza2\"," - "\"status\":{" - "\"code\":2," + "\"name\":\"stanza2\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":2," + "\"message\":\"no valid backups\"" + "}" + "}," + "{" + "\"cipher\":\"none\"," + "\"key\":2," + "\"status\":{" + "\"code\":1," + "\"message\":\"missing stanza path\"" + "}" + "}" + "]," + "\"status\":{" + "\"code\":4," "\"lock\":{\"backup\":{\"held\":false}}," - "\"message\":\"no valid backups\"" + "\"message\":\"different across repos\"" "}" "}" "]", - "json - multiple stanzas - selected found"); + "json - multiple stanzas - selected found, repo1"); - strLstAddZ(argListText, "--stanza=stanza2"); - harnessCfgLoad(cfgCmdInfo, argListText); + argList2 = strLstDup(argListText); + strLstAddZ(argList2, "--stanza=stanza2"); + harnessCfgLoad(cfgCmdInfo, argList2); TEST_RESULT_STR_Z( infoRender(), "stanza: stanza2\n" - " status: error (no valid backups)\n" + " status: mixed\n" + " repo1: error (no valid backups)\n" + " repo2: error (missing stanza path)\n" " cipher: none\n" "\n" " db (current)\n" - " wal archive min/max (9.4-1): none present\n", - "text - multiple stanzas - selected found"); + " wal archive min/max (9.4): none present\n", + "text - multiple stanzas - selected found, repo1"); + + // Remove backups from repo2 for stanza1 so multi-repos are scanned but backups are on only 1 repo + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("multi-repo, backups only on one"); + + content = strNew + ( + "[db]\n" + "db-catalog-version=201510051\n" + "db-control-version=942\n" + "db-id=1\n" + "db-system-id=6626363367545678089\n" + "db-version=\"9.5\"\n" + "\n" + "[cipher]\n" + "cipher-pass=\"somepass\"\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201510051,\"db-control-version\":942,\"db-system-id\":6626363367545678089," + "\"db-version\":\"9.5\"}\n" + ); + + filePathName = strNewFmt("%s/stanza1/backup.info", strZ(repo2backupPath)); + write = storageNewWriteP(storageLocalWrite(), filePathName); + filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeEncrypt, cipherTypeAes256Cbc, BUFSTRDEF(TEST_CIPHER_PASS), NULL)); + TEST_RESULT_VOID(storagePutP(write, harnessInfoChecksum(content)), "backup.info without current, repo2, stanza1"); + + argList2 = strLstDup(argListMultiRepo); + hrnCfgArgRawZ(argList2, cfgOptStanza, "stanza1"); + harnessCfgLoad(cfgCmdInfo, argList2); + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza1\n" + " status: mixed\n" + " repo1: ok\n" + " repo2: error (no valid backups)\n" + " cipher: mixed\n" + " repo1: none\n" + " repo2: aes-256-cbc\n" + "\n" + " db (prior)\n" + " wal archive min/max (9.4): 000000010000000000000002/000000020000000000000003\n" + "\n" + " full backup: 20181119-152138F\n" + " timestamp start/stop: 2018-11-19 15:21:38 / 2018-11-19 15:21:39\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000002\n" + " database size: 19.2MB, backup size: 19.2MB\n" + " repo1: size: 2.3MB, backup size: 2.3MB\n" + "\n" + " diff backup: 20181119-152138F_20181119-152152D\n" + " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" + " wal start/stop: 000000010000000000000003 / 000000020000000000000003\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20181119-152138F\n" + "\n" + " incr backup: 20181119-152138F_20181119-152155I\n" + " timestamp start/stop: 2018-11-19 15:21:55 / 2018-11-19 15:21:57\n" + " wal start/stop: n/a\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" + "\n" + " db (current)\n" + " wal archive min/max (9.5): 000000010000000000000002/000000010000000000000005\n" + "\n" + " full backup: 20201116-155000F\n" + " timestamp start/stop: 2020-11-16 15:50:00 / 2020-11-16 15:50:02\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000003\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n" + "\n" + " incr backup: 20201116-155000F_20201119-152100I\n" + " timestamp start/stop: 2020-11-19 15:21:00 / 2020-11-19 15:21:03\n" + " wal start/stop: 000000010000000000000005 / 000000010000000000000005\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20201116-155000F\n", + "text - multi-repo, valid backups only on repo1"); + + // Remove archives for prior backup so archiveMin prior DB == NULL but backupList > 0 (edge case) + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("multi-repo, prior backup: no archives but backups (code coverage)"); + + TEST_RESULT_VOID( + storagePathRemoveP(storageLocalWrite(), strNewFmt("%s/9.4-1", strZ(archiveStanza1Path)), .recurse = true), + "remove archives on db prior"); + + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza1\n" + " status: mixed\n" + " repo1: ok\n" + " repo2: error (no valid backups)\n" + " cipher: mixed\n" + " repo1: none\n" + " repo2: aes-256-cbc\n" + "\n" + " db (prior)\n" + " wal archive min/max (9.4): none present\n" + "\n" + " full backup: 20181119-152138F\n" + " timestamp start/stop: 2018-11-19 15:21:38 / 2018-11-19 15:21:39\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000002\n" + " database size: 19.2MB, backup size: 19.2MB\n" + " repo1: size: 2.3MB, backup size: 2.3MB\n" + "\n" + " diff backup: 20181119-152138F_20181119-152152D\n" + " timestamp start/stop: 2018-11-19 15:21:52 / 2018-11-19 15:21:55\n" + " wal start/stop: 000000010000000000000003 / 000000020000000000000003\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20181119-152138F\n" + "\n" + " incr backup: 20181119-152138F_20181119-152155I\n" + " timestamp start/stop: 2018-11-19 15:21:55 / 2018-11-19 15:21:57\n" + " wal start/stop: n/a\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20181119-152138F, 20181119-152138F_20181119-152152D\n" + "\n" + " db (current)\n" + " wal archive min/max (9.5): 000000010000000000000002/000000010000000000000005\n" + "\n" + " full backup: 20201116-155000F\n" + " timestamp start/stop: 2020-11-16 15:50:00 / 2020-11-16 15:50:02\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000003\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n" + "\n" + " incr backup: 20201116-155000F_20201119-152100I\n" + " timestamp start/stop: 2020-11-19 15:21:00 / 2020-11-19 15:21:03\n" + " wal start/stop: 000000010000000000000005 / 000000010000000000000005\n" + " database size: 19.2MB, backup size: 8.2KB\n" + " repo1: size: 2.3MB, backup size: 346B\n" + " backup reference list: 20201116-155000F\n", + "text - multi-repo, prior backup: no archives but backups (code coverage)"); + + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("multi-repo, stanza requested does not exist, but other stanzas do"); + + argList2 = strLstDup(argListMultiRepo); + hrnCfgArgRawZ(argList2, cfgOptStanza, "stanza4"); + harnessCfgLoad(cfgCmdInfo, argList2); + + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza4\n" + " status: error (missing stanza path)\n", + "multi-repo, stanza requested does not exist, but other stanzas do"); + + // Add stanza3 to repo1 but with a current PG that is different than repo2 + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("multi-repo, current database different across repos"); + + content = strNew + ( + "[db]\n" + "db-id=2\n" + "db-system-id=6626363367545678888\n" + "db-version=\"9.5\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6626363367545678089,\"db-version\":\"9.4\"}\n" + "2={\"db-id\":6626363367545678888,\"db-version\":\"9.5\"}\n" + ); + + filePathName = strNewFmt("%s/stanza3/archive.info", strZ(archivePath)); + TEST_RESULT_VOID( + storagePutP(storageNewWriteP(storageLocalWrite(), filePathName), harnessInfoChecksum(content)), + "put archive info to file - stanza3, repo1 stanza upgraded"); + + content = strNew + ( + "[db]\n" + "db-catalog-version=201409291\n" + "db-control-version=942\n" + "db-id=2\n" + "db-system-id=6626363367545678888\n" + "db-version=\"9.5\"\n" + "\n" + "[backup:current]\n" + "20201212-192538F={\"backrest-format\":5,\"backrest-version\":\"2.25\"," + "\"backup-archive-start\":\"000000010000000000000002\",\"backup-archive-stop\":\"000000010000000000000003\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1607801138,\"backup-timestamp-stop\":1607801140," + "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":true,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":true," + "\"option-online\":true}\n" + "20210112-192538F={\"backrest-format\":5,\"backrest-version\":\"2.25\"," + "\"backup-archive-start\":\"000000010000000000000006\",\"backup-archive-stop\":\"000000010000000000000006\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1610479538,\"backup-timestamp-stop\":1610479540," + "\"backup-type\":\"full\",\"db-id\":2,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":true,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":true," + "\"option-online\":true}\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6626363367545678089," + "\"db-version\":\"9.4\"}\n" + "2={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6626363367545678888," + "\"db-version\":\"9.5\"}\n" + ); + + filePathName = strNewFmt("%s/stanza3/backup.info", strZ(backupPath)); + TEST_RESULT_VOID( + storagePutP(storageNewWriteP(storageLocalWrite(), filePathName), harnessInfoChecksum(content)), + "put backup info to file - stanza3, repo1 stanza upgraded"); + + String *archiveStanza3 = strNewFmt("%s/stanza3/9.4-1/0000000100000000", strZ(archivePath)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveStanza3), "create stanza3 db1 WAL directory, repo1"); + filePathName = strNewFmt( + "%s/000000010000000000000002-47dff2b7552a9d66e4bae1a762488a6885e7082c.gz", strZ(archiveStanza3)); + TEST_RESULT_VOID(storagePutP(storageNewWriteP(storageLocalWrite(), filePathName), bufNew(0)), "touch WAL stanza3, db1"); + filePathName = strNewFmt( + "%s/000000010000000000000003-47dff2b7552a9d66e4bae1a762488a6885e7082c.gz", strZ(archiveStanza3)); + TEST_RESULT_VOID(storagePutP(storageNewWriteP(storageLocalWrite(), filePathName), bufNew(0)), "touch WAL stanza3, db1"); + + archiveStanza3 = strNewFmt("%s/stanza3/9.5-2/0000000100000000", strZ(archivePath)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), archiveStanza3), "create stanza3 db2 WAL directory, repo1"); + filePathName = strNewFmt( + "%s/000000010000000000000006-47dff2b7552a9d66e4bae1a762488a6885e7082c.gz", strZ(archiveStanza3)); + TEST_RESULT_VOID(storagePutP(storageNewWriteP(storageLocalWrite(), filePathName), bufNew(0)), "touch WAL stanza3, db2"); + + argList2 = strLstDup(argListMultiRepo); + hrnCfgArgRawZ(argList2, cfgOptStanza, "stanza3"); + harnessCfgLoad(cfgCmdInfo, argList2); + + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza3\n" + " status: error (database mismatch across repos)\n" + " repo1: ok\n" + " repo2: ok\n" + " cipher: mixed\n" + " repo1: none\n" + " repo2: aes-256-cbc\n" + "\n" + " db (prior)\n" + " wal archive min/max (9.4): 000000010000000000000001/000000010000000000000003\n" + "\n" + " full backup: 20201110-100000F\n" + " timestamp start/stop: 2020-11-10 10:00:00 / 2020-11-10 10:00:02\n" + " wal start/stop: 000000010000000000000001 / 000000010000000000000002\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo2: size: 3MB, backup size: 3KB\n" + "\n" + " full backup: 20201212-192538F\n" + " timestamp start/stop: 2020-12-12 19:25:38 / 2020-12-12 19:25:40\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000003\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n" + "\n" + " db (current)\n" + " wal archive min/max (9.5): 000000010000000000000006/000000010000000000000006\n" + "\n" + " full backup: 20210112-192538F\n" + " timestamp start/stop: 2021-01-12 19:25:38 / 2021-01-12 19:25:40\n" + " wal start/stop: 000000010000000000000006 / 000000010000000000000006\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n", + "text - multi-repo, database mismatch, repo2 stanza-upgrade needed"); + + hrnCfgArgRawZ(argList2, cfgOptOutput, "json"); + harnessCfgLoad(cfgCmdInfo, argList2); + + TEST_RESULT_STR_Z( + infoRender(), + "[" + "{" + "\"archive\":[" + "{" + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"id\":\"9.4-1\"," + "\"max\":\"000000010000000000000003\"," + "\"min\":\"000000010000000000000002\"" + "}," + "{" + "\"database\":{" + "\"id\":2," + "\"repo-key\":1" + "}," + "\"id\":\"9.5-2\"," + "\"max\":\"000000010000000000000006\"," + "\"min\":\"000000010000000000000006\"" + "}," + "{" + "\"database\":{" + "\"id\":1," + "\"repo-key\":2" + "}," + "\"id\":\"9.4-1\"," + "\"max\":\"000000010000000000000002\"," + "\"min\":\"000000010000000000000001\"" + "}" + "]," + "\"backup\":[" + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000001\"," + "\"stop\":\"000000010000000000000002\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.25\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":2" + "}," + "\"info\":{" + "\"delta\":26897020," + "\"repository\":{" + "\"delta\":3100," + "\"size\":3159000" + "}," + "\"size\":26897000" + "}," + "\"label\":\"20201110-100000F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1605002400," + "\"stop\":1605002402" + "}," + "\"type\":\"full\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000002\"," + "\"stop\":\"000000010000000000000003\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.25\"" + "}," + "\"database\":{" + "\"id\":1," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":26897020," + "\"repository\":{" + "\"delta\":3100," + "\"size\":3159000" + "}," + "\"size\":26897000" + "}," + "\"label\":\"20201212-192538F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1607801138," + "\"stop\":1607801140" + "}," + "\"type\":\"full\"" + "}," + "{" + "\"archive\":{" + "\"start\":\"000000010000000000000006\"," + "\"stop\":\"000000010000000000000006\"" + "}," + "\"backrest\":{" + "\"format\":5," + "\"version\":\"2.25\"" + "}," + "\"database\":{" + "\"id\":2," + "\"repo-key\":1" + "}," + "\"info\":{" + "\"delta\":26897020," + "\"repository\":{" + "\"delta\":3100," + "\"size\":3159000" + "}," + "\"size\":26897000" + "}," + "\"label\":\"20210112-192538F\"," + "\"prior\":null," + "\"reference\":null," + "\"timestamp\":{" + "\"start\":1610479538," + "\"stop\":1610479540" + "}," + "\"type\":\"full\"" + "}" + "]," + "\"cipher\":\"mixed\"," + "\"db\":[" + "{" + "\"id\":1," + "\"repo-key\":1," + "\"system-id\":6626363367545678089," + "\"version\":\"9.4\"" + "}," + "{" + "\"id\":2," + "\"repo-key\":1," + "\"system-id\":6626363367545678888," + "\"version\":\"9.5\"" + "}," + "{" + "\"id\":1," + "\"repo-key\":2," + "\"system-id\":6626363367545678089," + "\"version\":\"9.4\"" + "}" + "]," + "\"name\":\"stanza3\"," + "\"repo\":[" + "{" + "\"cipher\":\"none\"," + "\"key\":1," + "\"status\":{" + "\"code\":0," + "\"message\":\"ok\"" + "}" + "}," + "{" + "\"cipher\":\"aes-256-cbc\"," + "\"key\":2," + "\"status\":{" + "\"code\":0," + "\"message\":\"ok\"" + "}" + "}" + "]," + "\"status\":{" + "\"code\":5," + "\"lock\":{\"backup\":{\"held\":false}}," + "\"message\":\"database mismatch across repos\"" + "}" + "}" + "]", + "json - multi-repo, database mismatch, repo2 stanza-upgrade needed"); // Crypto error //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("encryption error"); + content = strNew ( "[global]\n" @@ -1132,61 +2318,248 @@ testRun(void) "HINT: backup.info cannot be opened and is required to perform a backup.\n" "HINT: has a stanza-create been performed?\n" "HINT: use option --stanza if encryption settings are different for the stanza than the global settings.", - strZ(backupStanza2Path), strZ(backupStanza2Path), strZ(strNewFmt("%s/backup.info.copy", strZ(backupStanza2Path)))); + strZ(backupStanza1Path), strZ(backupStanza1Path), strZ(strNewFmt("%s/backup.info.copy", strZ(backupStanza1Path)))); + + // Unset environment key + hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 2); } //****************************************************************************************************************************** - if (testBegin("formatTextDb()")) + if (testBegin("database mismatch - special cases")) { // These tests cover branches not covered in other tests - KeyValue *stanzaInfo = kvNew(); - VariantList *dbSection = varLstNew(); - Variant *pgInfo = varNewKv(kvNew()); - kvPut(varKv(pgInfo), DB_KEY_ID_VAR, varNewUInt(1)); - kvPut(varKv(pgInfo), DB_KEY_SYSTEM_ID_VAR, varNewUInt64(6625633699176220261)); - kvPut(varKv(pgInfo), DB_KEY_VERSION_VAR, VARSTR(pgVersionToStr(90500))); + TEST_TITLE("multi-repo, database mismatch, pg system-id only"); - varLstAdd(dbSection, pgInfo); + storagePathCreateP(storageLocalWrite(), archivePath); + storagePathCreateP(storageLocalWrite(), backupPath); + String *archivePath2 = strNewFmt("%s/repo2/%s", testPath(), "archive"); + String *backupPath2 = strNewFmt("%s/repo2/%s", testPath(), "backup"); + storagePathCreateP(storageLocalWrite(), archivePath2); + storagePathCreateP(storageLocalWrite(), backupPath2); - // Add the database history, backup and archive sections to the stanza info - kvPut(stanzaInfo, STANZA_KEY_DB_VAR, varNewVarLst(dbSection)); + String *content = strNew + ( + "[db]\n" + "db-catalog-version=201409291\n" + "db-control-version=942\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.4\"\n" + "\n" + "[backup:current]\n" + "20201116-155000F={" + "\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000010000000000000002\",\"backup-archive-stop\":\"000000010000000000000003\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1605541800,\"backup-timestamp-stop\":1605541802," + "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":false,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":false," + "\"option-online\":true}\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6569239123849665679," + "\"db-version\":\"9.4\"}\n" + ); - VariantList *backupSection = varLstNew(); - Variant *backupInfo = varNewKv(kvNew()); + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), strNewFmt("%s/backup.info", strZ(backupStanza1Path))), + harnessInfoChecksum(content)), + "put backup info to file, repo1"); - kvPut(varKv(backupInfo), BACKUP_KEY_LABEL_VAR, VARSTRDEF("20181119-152138F")); - kvPut(varKv(backupInfo), BACKUP_KEY_TYPE_VAR, VARSTRDEF("full")); - kvPutKv(varKv(backupInfo), KEY_ARCHIVE_VAR); - KeyValue *infoInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_INFO_VAR); - kvPut(infoInfo, KEY_SIZE_VAR, varNewUInt64(0)); - kvPut(infoInfo, KEY_DELTA_VAR, varNewUInt64(0)); - KeyValue *repoInfo = kvPutKv(infoInfo, INFO_KEY_REPOSITORY_VAR); - kvAdd(repoInfo, KEY_SIZE_VAR, varNewUInt64(0)); - kvAdd(repoInfo, KEY_DELTA_VAR, varNewUInt64(0)); - KeyValue *databaseInfo = kvPutKv(varKv(backupInfo), KEY_DATABASE_VAR); - kvAdd(databaseInfo, DB_KEY_ID_VAR, varNewUInt(1)); - KeyValue *timeInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_TIMESTAMP_VAR); - kvAdd(timeInfo, KEY_START_VAR, varNewUInt64(1542383276)); - kvAdd(timeInfo, KEY_STOP_VAR, varNewUInt64(1542383289)); + content = strNew + ( + "[db]\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.4\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6569239123849665679,\"db-version\":\"9.4\"}\n" + ); - varLstAdd(backupSection, backupInfo); + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), strNewFmt("%s/archive.info", strZ(archiveStanza1Path))), + harnessInfoChecksum(content)), + "put archive info to file, repo1"); - kvPut(stanzaInfo, STANZA_KEY_BACKUP_VAR, varNewVarLst(backupSection)); - kvPut(stanzaInfo, KEY_ARCHIVE_VAR, varNewVarLst(varLstNew())); + String *walPath = strNewFmt("%s/9.4-1/0000000100000000", strZ(archiveStanza1Path)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), walPath), "create stanza1, repo1, archive directory"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000002-22dff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(walPath)))))), 0, "touch WAL file, stanza1, repo1"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000003-37dff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(walPath)))))), 0, "touch WAL file, stanza1, repo1"); - String *result = strNew(""); - formatTextDb(stanzaInfo, result, NULL); + content = strNew + ( + "[db]\n" + "db-catalog-version=201409291\n" + "db-control-version=942\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.5\"\n" + "\n" + "[backup:current]\n" + "20201116-155010F={" + "\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000010000000000000001\",\"backup-archive-stop\":\"000000010000000000000002\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1605541810,\"backup-timestamp-stop\":1605541812," + "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":false,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":false," + "\"option-online\":true}\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6569239123849665679," + "\"db-version\":\"9.5\"}\n" + ); + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), strNewFmt("%s/stanza1/backup.info", strZ(backupPath2))), + harnessInfoChecksum(content)), + "put backup info to file, repo2, same system-id, different version"); + + content = strNew + ( + "[db]\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.5\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6569239123849665679,\"db-version\":\"9.5\"}\n" + ); + + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), strNewFmt("%s/stanza1/archive.info", strZ(archivePath2))), + harnessInfoChecksum(content)), + "put archive info to file, repo2, same system-id, different version"); + + walPath = strNewFmt("%s/stanza1/9.5-1/0000000100000000", strZ(archivePath2)); + TEST_RESULT_VOID(storagePathCreateP(storageLocalWrite(), walPath), "create stanza1, repo2, archive directory"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000001-11dff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(walPath)))))), 0, "touch WAL file, stanza1, repo2"); + TEST_RESULT_INT(system( + strZ(strNewFmt("touch %s", strZ(strNewFmt("%s/000000010000000000000002-222ff2b7552a9d66e4bae1a762488a6885e7082c.gz", + strZ(walPath)))))), 0, "touch WAL file, stanza1, repo2"); + + StringList *argList2 = strLstNew(); + hrnCfgArgRawZ(argList2, cfgOptRepoPath, TEST_PATH_REPO); + hrnCfgArgRawZ(argList2, cfgOptStanza, "stanza1"); + hrnCfgArgKeyRawFmt(argList2, cfgOptRepoPath, 2, "%s/repo2", testPath()); + harnessCfgLoad(cfgCmdInfo, argList2); + + // Note that although the time on the backup in repo2 > repo1, repo1 current db is not the same because of the version so + // the repo1, since read first, will be considered the current PG TEST_RESULT_STR_Z( - result, + infoRender(), + "stanza: stanza1\n" + " status: error (database mismatch across repos)\n" + " repo1: ok\n" + " repo2: ok\n" + " cipher: none\n" + "\n" + " db (prior)\n" + " wal archive min/max (9.5): 000000010000000000000001/000000010000000000000002\n" + "\n" + " full backup: 20201116-155010F\n" + " timestamp start/stop: 2020-11-16 15:50:10 / 2020-11-16 15:50:12\n" + " wal start/stop: 000000010000000000000001 / 000000010000000000000002\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo2: size: 3MB, backup size: 3KB\n" "\n" " db (current)\n" - " full backup: 20181119-152138F\n" - " timestamp start/stop: 2018-11-16 15:47:56 / 2018-11-16 15:48:09\n" - " wal start/stop: n/a\n" - " database size: 0B, backup size: 0B\n" - " repository size: 0B, repository backup size: 0B\n", - "formatTextDb only backup section (code coverage only)"); + " wal archive min/max (9.4): 000000010000000000000002/000000010000000000000003\n" + "\n" + " full backup: 20201116-155000F\n" + " timestamp start/stop: 2020-11-16 15:50:00 / 2020-11-16 15:50:02\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000003\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n", + "text - db mismatch, diff system-id across repos, repo1 considered current db since read first"); + + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("multi-repo, database mismatch, pg version only"); + + content = strNew + ( + "[db]\n" + "db-catalog-version=201409291\n" + "db-control-version=942\n" + "db-id=1\n" + "db-system-id=6569239123849665888\n" + "db-version=\"9.4\"\n" + "\n" + "[backup:current]\n" + "20201116-155010F={" + "\"backrest-format\":5,\"backrest-version\":\"2.30\"," + "\"backup-archive-start\":\"000000010000000000000001\",\"backup-archive-stop\":\"000000010000000000000002\"," + "\"backup-info-repo-size\":3159000,\"backup-info-repo-size-delta\":3100,\"backup-info-size\":26897000," + "\"backup-info-size-delta\":26897020,\"backup-timestamp-start\":1605541810,\"backup-timestamp-stop\":1605541812," + "\"backup-type\":\"full\",\"db-id\":1,\"option-archive-check\":true,\"option-archive-copy\":true," + "\"option-backup-standby\":false,\"option-checksum-page\":false,\"option-compress\":false,\"option-hardlink\":false," + "\"option-online\":true}\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201409291,\"db-control-version\":942,\"db-system-id\":6569239123849665888," + "\"db-version\":\"9.4\"}\n" + ); + + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), strNewFmt("%s/stanza1/backup.info", strZ(backupPath2))), + harnessInfoChecksum(content)), + "put backup info to file, repo2, different system-id, same version"); + + content = strNew + ( + "[db]\n" + "db-id=1\n" + "db-system-id=6569239123849665888\n" + "db-version=\"9.4\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6569239123849665888,\"db-version\":\"9.4\"}\n" + ); + + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), strNewFmt("%s/stanza1/archive.info", strZ(archivePath2))), + harnessInfoChecksum(content)), + "put archive info to file, repo2, different system-id, same version"); + + TEST_RESULT_STR_Z( + infoRender(), + "stanza: stanza1\n" + " status: error (database mismatch across repos)\n" + " repo1: ok\n" + " repo2: ok\n" + " cipher: none\n" + "\n" + " db (prior)\n" + " wal archive min/max (9.4): none present\n" + "\n" + " full backup: 20201116-155010F\n" + " timestamp start/stop: 2020-11-16 15:50:10 / 2020-11-16 15:50:12\n" + " wal start/stop: 000000010000000000000001 / 000000010000000000000002\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo2: size: 3MB, backup size: 3KB\n" + "\n" + " db (current)\n" + " wal archive min/max (9.4): 000000010000000000000002/000000010000000000000003\n" + "\n" + " full backup: 20201116-155000F\n" + " timestamp start/stop: 2020-11-16 15:50:00 / 2020-11-16 15:50:02\n" + " wal start/stop: 000000010000000000000002 / 000000010000000000000003\n" + " database size: 25.7MB, backup size: 25.7MB\n" + " repo1: size: 3MB, backup size: 3KB\n", + "text - db mismatch, diff version across repos, repo1 considered current db since read first"); } //****************************************************************************************************************************** @@ -1222,10 +2595,18 @@ testRun(void) TEST_ERROR_FMT( harnessCfgLoad(cfgCmdInfo, argList), OptionInvalidError, "option 'set' not valid without option 'stanza'"); - //-------------------------------------------------------------------------------------------------------------------------- + // Option --repo not requied when only 1 repo configured strLstAddZ(argList, "--stanza=stanza1"); harnessCfgLoad(cfgCmdInfo, argList); + TEST_ERROR_FMT( + cmdInfo(), FileMissingError, "manifest does not exist for backup 'bogus'\n" + "HINT: is the backup listed when running the info command with --stanza option only?"); + + // Option --repo when only 1 repo configured but will search the repo provided + strLstAddZ(argList, "--repo=1"); + harnessCfgLoad(cfgCmdInfo, argList); + TEST_ERROR_FMT( cmdInfo(), FileMissingError, "manifest does not exist for backup 'bogus'\n" "HINT: is the backup listed when running the info command with --stanza option only?"); diff --git a/test/src/module/command/remoteTest.c b/test/src/module/command/remoteTest.c index 1b931746e..cffcbac43 100644 --- a/test/src/module/command/remoteTest.c +++ b/test/src/module/command/remoteTest.c @@ -139,6 +139,7 @@ testRun(void) strLstAddZ(argList, "--stanza=test"); strLstAddZ(argList, "--process=0"); strLstAddZ(argList, "--" CFGOPT_REMOTE_TYPE "=" PROTOCOL_REMOTE_TYPE_REPO); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); harnessCfgLoadRole(cfgCmdArchivePush, cfgCmdRoleRemote, argList); cmdRemote(HARNESS_FORK_CHILD_READ(), HARNESS_FORK_CHILD_WRITE()); diff --git a/test/src/module/command/restoreTest.c b/test/src/module/command/restoreTest.c index 39e9c627c..4de1f37f2 100644 --- a/test/src/module/command/restoreTest.c +++ b/test/src/module/command/restoreTest.c @@ -2354,7 +2354,9 @@ testRun(void) argList = strLstNew(); strLstAddZ(argList, "--stanza=test1"); - strLstAdd(argList, strNewFmt("--repo1-path=%s", strZ(repoPath))); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo-bogus"); + hrnCfgArgKeyRaw(argList, cfgOptRepoPath, 2, repoPath); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); strLstAdd(argList, strNewFmt("--pg1-path=%s", strZ(pgPath))); strLstAddZ(argList, "--delta"); strLstAddZ(argList, "--type=preserve"); diff --git a/test/src/module/command/stanzaTest.c b/test/src/module/command/stanzaTest.c index f5483b159..edf0ecabd 100644 --- a/test/src/module/command/stanzaTest.c +++ b/test/src/module/command/stanzaTest.c @@ -33,11 +33,12 @@ testRun(void) strLstAdd(argListBase, strNewFmt("--repo1-path=%s/repo", testPath())); // ***************************************************************************************************************************** - if (testBegin("cmdStanzaCreate(), checkStanzaInfo()")) + if (testBegin("cmdStanzaCreate(), checkStanzaInfo(), cmdStanzaDelete()")) { // Load Parameters StringList *argList = strLstDup(argListBase); - strLstAddZ(argList, "--repo1-host=/repo/not/local"); + strLstAddZ(argList, "--repo1-host=/repo"); + strLstAddZ(argList, "--repo2-host=/repo/not/local"); harnessCfgLoad(cfgCmdStanzaCreate, argList); TEST_ERROR_FMT( @@ -45,7 +46,13 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- argList = strLstDup(argListBase); - harnessCfgLoad(cfgCmdStanzaCreate, argList); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); + TEST_ERROR_FMT( + harnessCfgLoad(cfgCmdStanzaCreate, argList), OptionInvalidError, "option 'repo' not valid for command 'stanza-create'"); + + //-------------------------------------------------------------------------------------------------------------------------- + harnessCfgLoad(cfgCmdStanzaCreate, argListBase); // Create the stop file TEST_RESULT_VOID( @@ -65,7 +72,8 @@ testRun(void) storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(stanza))), pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 6569239123849665679})); - TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - no files exist"); + TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - one repo, no files exist"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); String *contentArchive = strNew ( @@ -119,85 +127,256 @@ testRun(void) true, " test and stanza backup info files are equal"); //-------------------------------------------------------------------------------------------------------------------------- - TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - files already exist and both are valid"); - harnessLogResult("P00 INFO: stanza 'db' already exists and is valid"); + TEST_TITLE("cmdStanzaCreate success - multi-repo and encryption"); - // Remove backup.info + argList = strLstDup(argListBase); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawZ(argList, cfgOptRepoCipherType, 2, CIPHER_TYPE_AES_256_CBC); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, "12345678"); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 3, "%s/repo3", testPath()); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 4, "%s/repo4", testPath()); + hrnCfgArgKeyRawZ(argList, cfgOptRepoCipherType, 4, CIPHER_TYPE_AES_256_CBC); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 4, "87654321"); + harnessCfgLoad(cfgCmdStanzaCreate, argList); + + TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - files already exist on repo1 and both are valid"); + harnessLogResult( + "P00 INFO: stanza-create for stanza 'db' on repo1\n" + "P00 INFO: stanza 'db' already exists on repo1 and is valid\n" + "P00 INFO: stanza-create for stanza 'db' on repo2\n" + "P00 INFO: stanza-create for stanza 'db' on repo3\n" + "P00 INFO: stanza-create for stanza 'db' on repo4"); + + String *archiveInfoFileNameRepo2 = strNewFmt("repo2/archive/%s/archive.info", strZ(stanza)); + String *backupInfoFileNameRepo2 = strNewFmt("repo2/backup/%s/backup.info", strZ(stanza)); + String *archiveInfoFileNameRepo3 = strNewFmt("repo3/archive/%s/archive.info", strZ(stanza)); + String *backupInfoFileNameRepo3 = strNewFmt("repo3/backup/%s/backup.info", strZ(stanza)); + String *archiveInfoFileNameRepo4 = strNewFmt("repo4/archive/%s/archive.info", strZ(stanza)); + String *backupInfoFileNameRepo4 = strNewFmt("repo4/backup/%s/backup.info", strZ(stanza)); + + InfoArchive *infoArchive = NULL; + TEST_ASSIGN( + infoArchive, infoArchiveLoadFile(storageTest, archiveInfoFileNameRepo2, cipherTypeAes256Cbc, strNew("12345678")), + " load archive info"); + TEST_RESULT_PTR_NE(infoArchiveCipherPass(infoArchive), NULL, " cipher sub set"); + + InfoBackup *infoBackup = NULL; + TEST_ASSIGN( + infoBackup, infoBackupLoadFile(storageTest, backupInfoFileNameRepo2, cipherTypeAes256Cbc, strNew("12345678")), + " load backup info"); + TEST_RESULT_PTR_NE(infoBackupCipherPass(infoBackup), NULL, " cipher sub set"); + + TEST_RESULT_BOOL( + strEq(infoArchiveCipherPass(infoArchive), infoBackupCipherPass(infoBackup)), false, + " cipher sub different for archive and backup"); + + // Confirm non-encrypted repo created successfully + TEST_ASSIGN( + infoArchive, infoArchiveLoadFile(storageTest, archiveInfoFileNameRepo3, cipherTypeNone, NULL), " load archive info"); + TEST_RESULT_PTR(infoArchiveCipherPass(infoArchive), NULL, " archive cipher sub not set on non-encrypted repo"); + + TEST_ASSIGN( + infoBackup, infoBackupLoadFile(storageTest, backupInfoFileNameRepo3, cipherTypeNone, NULL)," load backup info"); + TEST_RESULT_PTR(infoBackupCipherPass(infoBackup), NULL, " backup cipher sub not set on non-encrypted repo"); + + // Confirm other repo encrypted with different password + TEST_ASSIGN( + infoArchive, infoArchiveLoadFile(storageTest, archiveInfoFileNameRepo4, cipherTypeAes256Cbc, strNew("87654321")), + " load archive info"); + TEST_RESULT_PTR_NE(infoArchiveCipherPass(infoArchive), NULL, " cipher sub set"); + + TEST_ASSIGN( + infoBackup, infoBackupLoadFile(storageTest, backupInfoFileNameRepo4, cipherTypeAes256Cbc, strNew("87654321")), + " load backup info"); + TEST_RESULT_PTR_NE(infoBackupCipherPass(infoBackup), NULL, " cipher sub set"); + + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cmdStanzaCreate missing files - multi-repo and encryption"); + + // Remove backup.info on repo1 TEST_RESULT_VOID(storageRemoveP(storageTest, backupInfoFileName, .errorOnMissing = true), "backup.info removed"); - TEST_RESULT_VOID(cmdStanzaCreate(), " stanza create - success with archive.info files and only backup.info.copy"); + + // Remove archive.info on repo2 + TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileNameRepo2, .errorOnMissing = true), "archive.info removed"); + + // Remove info files on repo3 + TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileNameRepo3, .errorOnMissing = true), "archive.info removed"); + TEST_RESULT_VOID(storageRemoveP(storageTest, backupInfoFileNameRepo3, .errorOnMissing = true), "backup.info removed"); + + // Remove copy files repo4 + TEST_RESULT_VOID( + storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileNameRepo4)), .errorOnMissing = true), + "archive.info.copy removed"); + TEST_RESULT_VOID( + storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileNameRepo4)), .errorOnMissing = true), + "backup.info.copy removed"); + + TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - success with missing files"); + harnessLogResult( + "P00 INFO: stanza-create for stanza 'db' on repo1\n" + "P00 INFO: stanza-create for stanza 'db' on repo2\n" + "P00 INFO: stanza-create for stanza 'db' on repo3\n" + "P00 INFO: stanza-create for stanza 'db' on repo4"); + TEST_RESULT_BOOL( bufEq( storageGetP(storageNewReadP(storageTest, backupInfoFileName)), storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName))))), - true, "backup.info recreated from backup.info.copy"); - - // Remove archive.info - TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileName, .errorOnMissing = true), "archive.info removed"); - TEST_RESULT_VOID(cmdStanzaCreate(), " stanza create - success with backup.info files and only archive.info.copy"); + true, "backup.info recreated repo1 from backup.info.copy"); TEST_RESULT_BOOL( bufEq( - storageGetP(storageNewReadP(storageTest, archiveInfoFileName)), - storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName))))), - true, "archive.info recreated from archive.info.copy"); - - // Remove info files - TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileName, .errorOnMissing = true), "archive.info removed"); - TEST_RESULT_VOID(storageRemoveP(storageTest, backupInfoFileName, .errorOnMissing = true), "backup.info removed"); - TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - success with copy files only"); + storageGetP(storageNewReadP(storageTest, archiveInfoFileNameRepo2)), + storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileNameRepo2))))), + true, "archive.info repo2 recreated from archive.info.copy"); TEST_RESULT_BOOL( (bufEq( - storageGetP(storageNewReadP(storageTest, backupInfoFileName)), - storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName))))) && + storageGetP(storageNewReadP(storageTest, backupInfoFileNameRepo3)), + storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileNameRepo3))))) && bufEq( - storageGetP(storageNewReadP(storageTest, archiveInfoFileName)), - storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName)))))), - true, "info files recreated from copy files"); - - // Remove copy files - TEST_RESULT_VOID( - storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName)), .errorOnMissing = true), - "archive.info.copy removed"); - TEST_RESULT_VOID( - storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName)), .errorOnMissing = true), - "backup.info.copy removed"); - TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - success with info files only"); + storageGetP(storageNewReadP(storageTest, archiveInfoFileNameRepo3)), + storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileNameRepo3)))))), + true, "info files recreated repo3 from copy files"); TEST_RESULT_BOOL( (bufEq( - storageGetP(storageNewReadP(storageTest, backupInfoFileName)), - storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName))))) && + storageGetP(storageNewReadP(storageTest, backupInfoFileNameRepo4)), + storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileNameRepo4))))) && bufEq( - storageGetP(storageNewReadP(storageTest, archiveInfoFileName)), - storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName)))))), - true, "info files recreated from info files"); + storageGetP(storageNewReadP(storageTest, archiveInfoFileNameRepo4)), + storageGetP(storageNewReadP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileNameRepo4)))))), + true, "info files recreated repo4 from info files"); - // Errors //-------------------------------------------------------------------------------------------------------------------------- - // Archive files removed - backup.info and backup.info.copy exist + TEST_TITLE("cmdStanzaDelete - multi-repo and encryption, delete"); + + StringList *argListCmd = strLstNew(); + hrnCfgArgKeyRawFmt(argListCmd, cfgOptRepoPath, 1, "%s/repo", testPath()); + hrnCfgArgKeyRawFmt(argListCmd, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawFmt(argListCmd, cfgOptRepoPath, 3, "%s/repo3", testPath()); + hrnCfgArgKeyRawFmt(argListCmd, cfgOptRepoPath, 4, "%s/repo4", testPath()); + hrnCfgArgRawFmt(argListCmd, cfgOptStanza, "%s", strZ(stanza)); + hrnCfgArgKeyRawFmt(argListCmd, cfgOptPgPath, 1, "%s/%s", testPath(), strZ(stanza)); + + TEST_ERROR_FMT( + harnessCfgLoad(cfgCmdStanzaDelete, argListCmd), OptionRequiredError, "stanza-delete command requires option: repo\n" + "HINT: this command requires a specific repository to operate on"); + + // Add the repo option + StringList *argListDelete = strLstDup(argListCmd); + hrnCfgArgRawZ(argListDelete, cfgOptRepo, "4"); + harnessCfgLoad(cfgCmdStanzaDelete, argListDelete); + + TEST_ERROR_FMT( + cmdStanzaDelete(), FileMissingError, "stop file does not exist for stanza 'db'\n" + "HINT: has the pgbackrest stop command been run on this server for this stanza?"); + + // Create the stop file + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), lockStopFileName(cfgOptionStr(cfgOptStanza))), BUFSTRDEF("")), + "create stop file"); + + TEST_RESULT_VOID(cmdStanzaDelete(), "stanza delete - repo4"); + TEST_RESULT_BOOL( + storagePathExistsP(storageTest, strNewFmt("repo4/archive/%s", strZ(stanza))), false, " stanza deleted"); + TEST_RESULT_BOOL( + storageExistsP(storageLocal(), lockStopFileName(cfgOptionStr(cfgOptStanza))), false, " stop file removed"); + + // Remove the cipher pass environment variable otherwise stanza-create will recreate the stanza + hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 4); + + // Stanza with directories only + argListDelete = strLstDup(argListCmd); + hrnCfgArgRawZ(argListDelete, cfgOptRepo, "3"); + harnessCfgLoad(cfgCmdStanzaDelete, argListDelete); + + TEST_RESULT_VOID( + storagePathCreateP(storageTest, strNewFmt("repo3/archive/%s/9.6-1/1234567812345678", strZ(stanza))), + "create archive sub directory"); + TEST_RESULT_VOID( + storagePathCreateP(storageTest, strNewFmt("repo3/backup/%s/20190708-154306F", strZ(stanza))), + "create backup sub directory"); + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageLocalWrite(), lockStopFileName(cfgOptionStr(cfgOptStanza))), BUFSTRDEF("")), + "create stop file"); + TEST_RESULT_VOID(cmdStanzaDelete(), " stanza delete - repo3 - sub directories only"); + TEST_RESULT_BOOL( + storagePathExistsP(storageTest, strNewFmt("repo3/archive/%s", strZ(stanza))), false, " stanza archive deleted"); + TEST_RESULT_BOOL( + storagePathExistsP(storageTest, strNewFmt("repo3/backup/%s", strZ(stanza))), false, " stanza backup deleted"); + + //-------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cmdStanzaCreate errors"); + + argList = strLstDup(argListBase); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgKeyRawZ(argList, cfgOptRepoCipherType, 2, CIPHER_TYPE_AES_256_CBC); + hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, "12345678"); + harnessCfgLoad(cfgCmdStanzaCreate, argList); + + // Backup files removed - archive.info and archive.info.copy exist repo2 + TEST_RESULT_VOID( + storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileNameRepo2)), .errorOnMissing = true), + "backup.info.copy removed repo2"); + TEST_RESULT_VOID(storageRemoveP(storageTest, backupInfoFileNameRepo2, .errorOnMissing = true), + "backup.info removed repo2"); + TEST_ERROR_FMT( + cmdStanzaCreate(), FileMissingError, "archive.info exists but backup.info is missing on repo2\n" + "HINT: this may be a symptom of repository corruption!"); + harnessLogResult( + "P00 INFO: stanza-create for stanza 'db' on repo1\n" + "P00 INFO: stanza 'db' already exists on repo1 and is valid\n" + "P00 INFO: stanza-create for stanza 'db' on repo2"); + + // Archive files removed - backup.info and backup.info.copy exist repo1 TEST_RESULT_VOID( storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName)), .errorOnMissing = true), - "archive.info.copy removed"); - TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileName, .errorOnMissing = true), "archive.info removed"); + "archive.info.copy removed repo1"); + TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileName, .errorOnMissing = true), + "archive.info removed repo1"); TEST_ERROR_FMT( - cmdStanzaCreate(), FileMissingError, "backup.info exists but archive.info is missing\n" + cmdStanzaCreate(), FileMissingError, "backup.info exists but archive.info is missing on repo1\n" "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); - // Archive files removed - backup.info.copy exists + // Delete the last repo so only 1 remains + argListDelete = strLstDup(argListCmd); + hrnCfgArgRawZ(argListDelete, cfgOptRepo, "2"); + harnessCfgLoad(cfgCmdStanzaDelete, argListDelete); + + // Create the stop file TEST_RESULT_VOID( - storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName)), .errorOnMissing = true), - "backup.info.copy removed"); - TEST_ERROR_FMT( - cmdStanzaCreate(), FileMissingError, "backup.info exists but archive.info is missing\n" - "HINT: this may be a symptom of repository corruption!"); + storagePutP( + storageNewWriteP(storageLocalWrite(), lockStopFileName(cfgOptionStr(cfgOptStanza))), BUFSTRDEF("")), + "create stop file"); + + TEST_RESULT_VOID(cmdStanzaDelete(), " stanza delete - only 1 remains"); + + // Remove the cipher pass environment variable otherwise stanza-create will recreate the stanza + hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 2); + + argList = strLstDup(argListBase); + harnessCfgLoad(cfgCmdStanzaCreate, argList); // Archive files removed - backup.info exists + TEST_RESULT_VOID( + storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName)), .errorOnMissing = true), + "backup.info.copy removed"); + TEST_ERROR_FMT( + cmdStanzaCreate(), FileMissingError, "backup.info exists but archive.info is missing on repo1\n" + "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); + + // Archive files removed - backup.info.copy exists TEST_RESULT_VOID( storageMoveP(storageTest, storageNewReadP(storageTest, backupInfoFileName), storageNewWriteP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName)))), "backup.info moved to backup.info.copy"); TEST_ERROR_FMT( - cmdStanzaCreate(), FileMissingError, "backup.info exists but archive.info is missing\n" + cmdStanzaCreate(), FileMissingError, "backup.info exists but archive.info is missing on repo1\n" "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // Backup files removed - archive.info file exists TEST_RESULT_VOID( @@ -208,8 +387,9 @@ testRun(void) storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName)), .errorOnMissing = true), "backup.info.copy removed"); TEST_ERROR_FMT( - cmdStanzaCreate(), FileMissingError, "archive.info exists but backup.info is missing\n" + cmdStanzaCreate(), FileMissingError, "archive.info exists but backup.info is missing on repo1\n" "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // Backup files removed - archive.info.copy file exists TEST_RESULT_VOID( @@ -218,17 +398,9 @@ testRun(void) storageNewWriteP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName)))), "archive.info moved to archive.info.copy"); TEST_ERROR_FMT( - cmdStanzaCreate(), FileMissingError, "archive.info exists but backup.info is missing\n" - "HINT: this may be a symptom of repository corruption!"); - - // Backup files removed - archive.info and archive.info.copy exist - TEST_RESULT_VOID( - storagePutP( - storageNewWriteP(storageTest, archiveInfoFileName), harnessInfoChecksum(contentArchive)), - "put archive info to file"); - TEST_ERROR_FMT( - cmdStanzaCreate(), FileMissingError, "archive.info exists but backup.info is missing\n" + cmdStanzaCreate(), FileMissingError, "archive.info exists but backup.info is missing on repo1\n" "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // checkStanzaInfo() - already checked in checkTest so just a sanity check here //-------------------------------------------------------------------------------------------------------------------------- @@ -256,6 +428,7 @@ testRun(void) "archive: id = 1, version = 9.6, system-id = 6569239123849665679\n" "backup : id = 2, version = 9.6, system-id = 6569239123849665679\n" "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); //-------------------------------------------------------------------------------------------------------------------------- // Copy files may or may not exist - remove @@ -300,6 +473,7 @@ testRun(void) cmdStanzaCreate(), FileInvalidError, "backup and archive info files exist but do not match the database\n" "HINT: is this the correct stanza?\n" "HINT: did an error occur during stanza-upgrade?"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // Create archive.info and backup.info files that match but do not match the current database system-id contentArchive = strNew @@ -339,6 +513,7 @@ testRun(void) cmdStanzaCreate(), FileInvalidError, "backup and archive info files exist but do not match the database\n" "HINT: is this the correct stanza?\n" "HINT: did an error occur during stanza-upgrade?"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // Remove the info files and add sub directory to backup TEST_RESULT_VOID(storageRemoveP(storageTest, archiveInfoFileName, .errorOnMissing = true), "archive.info removed"); @@ -348,6 +523,7 @@ testRun(void) storagePathCreateP(storageTest, strNewFmt("%s/backup.history", strZ(backupStanzaPath))), "create directory in backup"); TEST_ERROR_FMT(cmdStanzaCreate(), PathNotEmptyError, "backup directory not empty"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // File in archive, directory in backup TEST_RESULT_VOID( @@ -355,18 +531,22 @@ testRun(void) storageNewWriteP(storageTest, strNewFmt("%s/somefile", strZ(archiveStanzaPath))), BUFSTRDEF("some content")), "create file in archive"); TEST_ERROR_FMT(cmdStanzaCreate(), PathNotEmptyError, "backup directory and/or archive directory not empty"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // File in archive, backup empty TEST_RESULT_VOID( storagePathRemoveP(storageTest, strNewFmt("%s/backup.history", strZ(backupStanzaPath))), "remove backup subdir"); TEST_ERROR_FMT(cmdStanzaCreate(), PathNotEmptyError, "archive directory not empty"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); // Repeat last test using --force (deprecated) //-------------------------------------------------------------------------------------------------------------------------- strLstAddZ(argList, "--force"); harnessCfgLoad(cfgCmdStanzaCreate, argList); TEST_ERROR_FMT(cmdStanzaCreate(), PathNotEmptyError, "archive directory not empty"); - harnessLogResult("P00 WARN: option --force is no longer supported"); + harnessLogResult( + "P00 WARN: option --force is no longer supported\n" + "P00 INFO: stanza-create for stanza 'db' on repo1"); } // ***************************************************************************************************************************** @@ -396,9 +576,11 @@ testRun(void) }); TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - db online"); + harnessLogResult("P00 INFO: stanza-create for stanza 'db' on repo1"); TEST_RESULT_BOOL( storageExistsP(storageTest, strNewFmt("repo/archive/%s/archive.info", strZ(stanza))), true, " stanza created"); + harnessCfgLoad(cfgCmdStanzaUpgrade, argList); harnessPqScriptSet((HarnessPq []) { HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_92, strZ(pg1Path), false, NULL, NULL), @@ -406,7 +588,9 @@ testRun(void) }); TEST_RESULT_VOID(cmdStanzaUpgrade(), "stanza upgrade - db online"); - harnessLogResult("P00 INFO: stanza 'db' is already up to date"); + harnessLogResult( + "P00 INFO: stanza-upgrade for stanza 'db' on repo1\n" + "P00 INFO: stanza 'db' on repo1 is already up to date"); // Version mismatch //-------------------------------------------------------------------------------------------------------------------------- @@ -482,41 +666,16 @@ testRun(void) TEST_RESULT_UINT(pgControl.catalogVersion, 201204301, " catalogVersion set"); } - // ***************************************************************************************************************************** - if (testBegin("cmdStanzaCreate() - encryption")) - { - StringList *argList = strLstDup(argListBase); - strLstAddZ(argList, "--repo1-cipher-type=aes-256-cbc"); - setenv("PGBACKREST_REPO1_CIPHER_PASS", "12345678", true); - harnessCfgLoad(cfgCmdStanzaCreate, argList); - - // Create pg_control - storagePutP( - storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(stanza))), - pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 6569239123849665679})); - - TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create - encryption"); - - InfoArchive *infoArchive = NULL; - TEST_ASSIGN( - infoArchive, infoArchiveLoadFile(storageTest, archiveInfoFileName, cipherTypeAes256Cbc, strNew("12345678")), - " load archive info"); - TEST_RESULT_PTR_NE(infoArchiveCipherPass(infoArchive), NULL, " cipher sub set"); - - InfoBackup *infoBackup = NULL; - TEST_ASSIGN( - infoBackup, infoBackupLoadFile(storageTest, backupInfoFileName, cipherTypeAes256Cbc, strNew("12345678")), - " load backup info"); - TEST_RESULT_PTR_NE(infoBackupCipherPass(infoBackup), NULL, " cipher sub set"); - - TEST_RESULT_BOOL( - strEq(infoArchiveCipherPass(infoArchive), infoBackupCipherPass(infoBackup)), false, - " cipher sub different for archive and backup"); - } - // ***************************************************************************************************************************** if (testBegin("cmdStanzaUpgrade()")) { + // Create pg_control + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(stanza))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 6569239123849665679})), + "create pg_control"); + // Load Parameters StringList *argList = strLstDup(argListBase); strLstAddZ(argList, "--repo1-host=/repo/not/local"); @@ -527,7 +686,14 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- argList = strLstDup(argListBase); - harnessCfgLoad(cfgCmdStanzaUpgrade, argList); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); + TEST_ERROR_FMT( + harnessCfgLoad(cfgCmdStanzaUpgrade, argList), OptionInvalidError, + "option 'repo' not valid for command 'stanza-upgrade'"); + + //-------------------------------------------------------------------------------------------------------------------------- + harnessCfgLoad(cfgCmdStanzaUpgrade, argListBase); // Create the stop file TEST_RESULT_VOID( @@ -539,31 +705,45 @@ testRun(void) storageRemoveP(storageLocalWrite(), lockStopFileName(cfgOptionStr(cfgOptStanza))), " remove the stop file"); //-------------------------------------------------------------------------------------------------------------------------- - // Load Parameters - argList = strLstDup(argListBase); - harnessCfgLoad(cfgCmdStanzaCreate, argList); + TEST_TITLE("cmdStanzaUpgrade - info file mismatches"); - // Create pg_control - storagePutP( - storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(stanza))), - pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 6569239123849665679})); + // Stanza with only archive.info and backup.info but no .copy files + String *contentBackup = strNew + ( + "[db]\n" + "db-catalog-version=201608131\n" + "db-control-version=960\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.6\"\n" + "\n" + "[db:history]\n" + "1={\"db-catalog-version\":201608131,\"db-control-version\":960,\"db-system-id\":6569239123849665679," + "\"db-version\":\"9.6\"}\n" + ); + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageTest, backupInfoFileName), harnessInfoChecksum(contentBackup)), + "put backup info to file"); - TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create"); - - //-------------------------------------------------------------------------------------------------------------------------- - argList = strLstDup(argListBase); - harnessCfgLoad(cfgCmdStanzaUpgrade, argList); - - TEST_RESULT_VOID(cmdStanzaUpgrade(), "stanza upgrade - files already exist and both are valid"); - harnessLogResult("P00 INFO: stanza 'db' is already up to date"); - - // Remove the copy files - storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(archiveInfoFileName)), .errorOnMissing = true); - storageRemoveP(storageTest, strNewFmt("%s" INFO_COPY_EXT, strZ(backupInfoFileName)), .errorOnMissing = true); + String *contentArchive = strNew + ( + "[db]\n" + "db-id=1\n" + "db-system-id=6569239123849665679\n" + "db-version=\"9.6\"\n" + "\n" + "[db:history]\n" + "1={\"db-id\":6569239123849665679,\"db-version\":\"9.6\"}\n" + ); + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageTest, archiveInfoFileName), harnessInfoChecksum(contentArchive)), + "put archive info file"); // backup info up to date but archive info db-id mismatch //-------------------------------------------------------------------------------------------------------------------------- - String *contentArchive = strNew + contentArchive = strNew ( "[db]\n" "db-id=2\n" @@ -583,10 +763,11 @@ testRun(void) "archive: id = 2, version = 9.6, system-id = 6569239123849665679\n" "backup : id = 1, version = 9.6, system-id = 6569239123849665679\n" "HINT: this may be a symptom of repository corruption!"); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'db' on repo1"); // backup info up to date but archive info version is not //-------------------------------------------------------------------------------------------------------------------------- - String *contentBackup = strNew + contentBackup = strNew ( "[db]\n" "db-catalog-version=201608131\n" @@ -622,6 +803,7 @@ testRun(void) "put archive info to file"); TEST_RESULT_VOID(cmdStanzaUpgrade(), "stanza upgrade - archive.info file upgraded - version"); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'db' on repo1"); contentArchive = strNew ( "[db]\n" @@ -667,6 +849,7 @@ testRun(void) "put backup info to file"); TEST_RESULT_VOID(cmdStanzaUpgrade(), "stanza upgrade - backup.info file upgraded - version"); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'db' on repo1"); contentBackup = strNew ( "[db]\n" @@ -734,6 +917,7 @@ testRun(void) "put archive info to file"); TEST_RESULT_VOID(cmdStanzaUpgrade(), "stanza upgrade - archive.info file upgraded - system-id"); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'db' on repo1"); contentArchive = strNew ( "[db]\n" @@ -779,6 +963,7 @@ testRun(void) "put backup info to file"); TEST_RESULT_VOID(cmdStanzaUpgrade(), "stanza upgrade - backup.info file upgraded - system-id"); + harnessLogResult("P00 INFO: stanza-upgrade for stanza 'db' on repo1"); contentBackup = strNew ( "[db]\n" @@ -841,6 +1026,7 @@ testRun(void) pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 6569239123849665679})); TEST_RESULT_VOID(cmdStanzaCreate(), "create a stanza that will not be deleted"); + harnessLogResult("P00 INFO: stanza-create for stanza 'otherstanza' on repo1"); argList = strLstDup(argListCmd); strLstAdd(argList, strNewFmt("--stanza=%s", strZ(stanza))); @@ -855,62 +1041,6 @@ testRun(void) TEST_RESULT_BOOL(stanzaDelete(storageRepoWrite(), NULL, strLstNew()), true, " archiveList=NULL, backupList=0"); TEST_RESULT_BOOL(stanzaDelete(storageRepoWrite(), strLstNew(), strLstNew()), true, " archiveList=0, backupList=0"); - // create and delete a stanza - //-------------------------------------------------------------------------------------------------------------------------- - argList = strLstDup(argListCmd); - strLstAdd(argList, strNewFmt("--stanza=%s", strZ(stanza))); - strLstAdd(argList,strNewFmt("--pg1-path=%s/%s", testPath(), strZ(stanza))); - strLstAddZ(argList, "--no-online"); - harnessCfgLoad(cfgCmdStanzaCreate, argList); - - // Create pg_control for stanza-create - storagePutP( - storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(stanza))), - pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 6569239123849665679})); - - TEST_RESULT_VOID(cmdStanzaCreate(), "create a stanza to be deleted"); - TEST_RESULT_BOOL( - storageExistsP(storageTest, strNewFmt("repo/archive/%s/archive.info", strZ(stanza))), true, " stanza created"); - - argList = strLstDup(argListCmd); - strLstAdd(argList, strNewFmt("--stanza=%s", strZ(stanza))); - strLstAdd(argList,strNewFmt("--pg1-path=%s/%s", testPath(), strZ(stanza))); - harnessCfgLoad(cfgCmdStanzaDelete, argList); - - TEST_ERROR_FMT( - cmdStanzaDelete(), FileMissingError, "stop file does not exist for stanza 'db'\n" - "HINT: has the pgbackrest stop command been run on this server for this stanza?"); - - // Create the stop file - TEST_RESULT_VOID( - storagePutP( - storageNewWriteP(storageLocalWrite(), lockStopFileName(cfgOptionStr(cfgOptStanza))), BUFSTRDEF("")), - "create stop file"); - - TEST_RESULT_VOID(cmdStanzaDelete(), "stanza delete"); - TEST_RESULT_BOOL( - storagePathExistsP(storageTest, strNewFmt("repo/archive/%s", strZ(stanza))), false, " stanza deleted"); - TEST_RESULT_BOOL( - storageExistsP(storageLocal(), lockStopFileName(cfgOptionStr(cfgOptStanza))), false, " stop file removed"); - - // Create stanza with directories only - //-------------------------------------------------------------------------------------------------------------------------- - TEST_RESULT_VOID( - storagePathCreateP(storageTest, strNewFmt("repo/archive/%s/9.6-1/1234567812345678", strZ(stanza))), - "create archive sub directory"); - TEST_RESULT_VOID( - storagePathCreateP(storageTest, strNewFmt("repo/backup/%s/20190708-154306F", strZ(stanza))), - "create backup sub directory"); - TEST_RESULT_VOID( - storagePutP( - storageNewWriteP(storageLocalWrite(), lockStopFileName(cfgOptionStr(cfgOptStanza))), BUFSTRDEF("")), - "create stop file"); - TEST_RESULT_VOID(cmdStanzaDelete(), " stanza delete - sub directories only"); - TEST_RESULT_BOOL( - storagePathExistsP(storageTest, strNewFmt("repo/archive/%s", strZ(stanza))), false, " stanza archive deleted"); - TEST_RESULT_BOOL( - storagePathExistsP(storageTest, strNewFmt("repo/backup/%s", strZ(stanza))), false, " stanza backup deleted"); - // Create stanza archive only //-------------------------------------------------------------------------------------------------------------------------- TEST_RESULT_VOID( @@ -1008,7 +1138,21 @@ testRun(void) "create pid file"); TEST_ERROR_FMT( cmdStanzaDelete(), PgRunningError, PG_FILE_POSTMASTERPID " exists - looks like " PG_NAME " is running. " - "To delete stanza 'db', shut down " PG_NAME " for stanza 'db' and try again, or use --force."); + "To delete stanza 'db' on repo1, shut down " PG_NAME " for stanza 'db' and try again, or use --force."); + + // Specify repo option + StringList *argListDel = strLstDup(argList); + hrnCfgArgKeyRawFmt(argListDel, cfgOptRepoPath, 2, "%s/repo2", testPath()); + hrnCfgArgRawZ(argListDel, cfgOptRepo, "2"); + harnessCfgLoad(cfgCmdStanzaDelete, argListDel); + + TEST_RESULT_VOID( + storagePutP( + storageNewWriteP(storageTest, strNewFmt("repo2/backup/%s/backup.info", strZ(stanza))), BUFSTRDEF("")), + "create backup.info"); + TEST_ERROR_FMT( + cmdStanzaDelete(), PgRunningError, PG_FILE_POSTMASTERPID " exists - looks like " PG_NAME " is running. " + "To delete stanza 'db' on repo2, shut down " PG_NAME " for stanza 'db' and try again, or use --force."); //-------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("force delete when pg appears to be running"); diff --git a/test/src/module/command/verifyTest.c b/test/src/module/command/verifyTest.c index 00a03aa60..7f552a688 100644 --- a/test/src/module/command/verifyTest.c +++ b/test/src/module/command/verifyTest.c @@ -674,8 +674,17 @@ testRun(void) // ***************************************************************************************************************************** if (testBegin("cmdVerify(), verifyProcess()")) { - // Load Parameters + //-------------------------------------------------------------------------------------------------------------------------- StringList *argList = strLstDup(argListBase); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 4, "%s/repo4", testPath()); + + TEST_ERROR_FMT( + harnessCfgLoad(cfgCmdVerify, argList), OptionRequiredError, "verify command requires option: repo\n" + "HINT: this command requires a specific repository to operate on"); + + //-------------------------------------------------------------------------------------------------------------------------- + // Load Parameters with multi-repo + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); harnessCfgLoad(cfgCmdVerify, argList); // Store valid archive/backup info files @@ -808,6 +817,10 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("valid info files, start next timeline"); + // Load Parameters - single default repo + argList = strLstDup(argListBase); + harnessCfgLoad(cfgCmdVerify, argList); + TEST_RESULT_VOID( storagePutP( storageNewWriteP( @@ -844,6 +857,13 @@ testRun(void) //-------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("valid info files, unreadable WAL file"); + // Load Parameters - single non-default repo + argList = strLstNew(); + hrnCfgArgKeyRawFmt(argList, cfgOptRepoPath, 2, "%s/repo", testPath()); + hrnCfgArgRawFmt(argList, cfgOptStanza, "%s", strZ(stanza)); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); + harnessCfgLoad(cfgCmdVerify, argList); + TEST_RESULT_VOID( storagePutP( storageNewWriteP( diff --git a/test/src/module/config/loadTest.c b/test/src/module/config/loadTest.c index eebc14e0f..32805ccd0 100644 --- a/test/src/module/config/loadTest.c +++ b/test/src/module/config/loadTest.c @@ -7,6 +7,8 @@ Test Configuration Load #include "version.h" #include "common/harnessConfig.h" +#include "storage/cifs/storage.h" +#include "storage/posix/storage.h" /*********************************************************************************************************************************** Test run @@ -32,22 +34,102 @@ testRun(void) // ***************************************************************************************************************************** if (testBegin("cfgLoadUpdateOption()")) { - TEST_TITLE("repo-host-cmd is defaulted when null"); + TEST_TITLE("error if user passes pg/repo options when they are internal"); StringList *argList = strLstNew(); hrnCfgArgRawZ(argList, cfgOptStanza, "test"); hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg1"); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); + TEST_ERROR(harnessCfgLoad(cfgCmdCheck, argList), OptionInvalidError, "option 'repo' not valid for command 'check'"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when repo option not set and repo total > 1 or first repo index != 1"); + + argList = strLstNew(); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo1"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 4, "/repo4"); + hrnCfgArgRawZ(argList, cfgOptStanza, "test"); + hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg1"); + TEST_ERROR( + harnessCfgLoad(cfgCmdStanzaDelete, argList), OptionRequiredError, + "stanza-delete command requires option: repo\n" + "HINT: this command requires a specific repository to operate on"); + + argList = strLstNew(); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 2, "/repo2"); + hrnCfgArgRawZ(argList, cfgOptStanza, "test"); + hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg1"); + TEST_ERROR( + harnessCfgLoad(cfgCmdStanzaDelete, argList), OptionRequiredError, + "stanza-delete command requires option: repo\n" + "HINT: this command requires a specific repository to operate on"); + + argList = strLstNew(); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo1"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 4, "/repo4"); + TEST_RESULT_VOID(harnessCfgLoad(cfgCmdInfo, argList), "load info config -- option repo not required"); + TEST_RESULT_BOOL(cfgCommand() == cfgCmdInfo, true, " command is info"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("local default repo paths must be different"); + + argList = strLstNew(); + hrnCfgArgRawZ(argList, cfgOptStanza, "test"); + hrnCfgArgRawZ(argList, cfgOptRepo, "3"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionDiff, 4, "4"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionDiff, 3, "3"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo1"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoHost, 2, "host2"); + TEST_ERROR( + harnessCfgLoad(cfgCmdExpire, argList), OptionInvalidValueError, + "local repo3 and repo4 paths are both '/var/lib/pgbackrest' but must be different"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("local default repo paths for cifs repo type must be different"); + + argList = strLstNew(); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoType, 1, STORAGE_CIFS_TYPE); + hrnCfgArgKeyRawZ(argList, cfgOptRepoType, 2, STORAGE_CIFS_TYPE); + TEST_ERROR( + harnessCfgLoad(cfgCmdInfo, argList), OptionInvalidValueError, + "local repo1 and repo2 paths are both '/var/lib/pgbackrest' but must be different"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("local repo paths same but types different"); + + argList = strLstNew(); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoType, 1, STORAGE_POSIX_TYPE); + hrnCfgArgKeyRawZ(argList, cfgOptRepoType, 2, STORAGE_CIFS_TYPE); + hrnCfgArgKeyRawZ(argList, cfgOptRepoType, 3, "s3"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoS3Bucket, 3, "cool-bucket"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoS3Region, 3, "region"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoS3Endpoint, 3, "endpoint"); + hrnCfgEnvKeyRawZ(cfgOptRepoS3Key, 3, "mykey"); + hrnCfgEnvKeyRawZ(cfgOptRepoS3KeySecret, 3, "mysecretkey"); + harnessCfgLoad(cfgCmdInfo, argList); + + hrnCfgEnvKeyRemoveRaw(cfgOptRepoS3Key, 3); + hrnCfgEnvKeyRemoveRaw(cfgOptRepoS3KeySecret, 3); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("repo-host-cmd is defaulted when null"); + + argList = strLstNew(); + hrnCfgArgRawZ(argList, cfgOptStanza, "test"); + hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg1"); harnessCfgLoad(cfgCmdCheck, argList); - cfgOptionSet(cfgOptRepoHost, cfgSourceParam, varNewStrZ("repo-host")); + cfgOptionIdxSet(cfgOptRepoHost, 0, cfgSourceParam, varNewStrZ("repo-host")); TEST_RESULT_VOID(cfgLoadUpdateOption(), "repo remote command is updated"); - TEST_RESULT_STR_Z(cfgOptionStr(cfgOptRepoHostCmd), testProjectExe(), " check repo1-host-cmd"); + TEST_RESULT_STR_Z(cfgOptionIdxStr(cfgOptRepoHostCmd, 0), testProjectExe(), " check repo1-host-cmd"); - cfgOptionSet(cfgOptRepoHostCmd, cfgSourceParam, VARSTRDEF("/other")); + cfgOptionIdxSet(cfgOptRepoHostCmd, 0, cfgSourceParam, VARSTRDEF("/other")); TEST_RESULT_VOID(cfgLoadUpdateOption(), "repo remote command was already set"); - TEST_RESULT_STR_Z(cfgOptionStr(cfgOptRepoHostCmd), "/other", " check repo1-host-cmd"); + TEST_RESULT_STR_Z(cfgOptionIdxStr(cfgOptRepoHostCmd, 0), "/other", " check repo1-host-cmd"); // ------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("pg-host-cmd is defaulted when null"); @@ -151,6 +233,7 @@ testRun(void) argList = strLstNew(); hrnCfgArgRawZ(argList, cfgOptRepoHost, "repo1"); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); harnessCfgLoad(cfgCmdInfo, argList); // ------------------------------------------------------------------------------------------------------------------------- @@ -258,27 +341,33 @@ testRun(void) TEST_RESULT_BOOL(cfgOptionTest(cfgOptRepoRetentionArchive), false, " repo1-retention-archive not set"); // ------------------------------------------------------------------------------------------------------------------------- - setenv("PGBACKREST_REPO1_S3_KEY", "mykey", true); - setenv("PGBACKREST_REPO1_S3_KEY_SECRET", "mysecretkey", true); - // Invalid bucket name with verification enabled fails argList = strLstNew(); strLstAdd(argList, strNew("--stanza=db")); hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/pg"); - strLstAdd(argList, strNew("--repo1-type=s3")); - strLstAdd(argList, strNew("--repo1-s3-bucket=bogus.bucket")); - strLstAdd(argList, strNew("--repo1-s3-region=region")); - strLstAdd(argList, strNew("--repo1-s3-endpoint=endpoint")); - strLstAdd(argList, strNew("--repo1-path=/repo")); + strLstAdd(argList, strNew("--repo2-type=s3")); + strLstAdd(argList, strNew("--repo2-s3-bucket=bogus.bucket")); + strLstAdd(argList, strNew("--repo2-s3-region=region")); + strLstAdd(argList, strNew("--repo2-s3-endpoint=endpoint")); + strLstAdd(argList, strNew("--repo2-path=/repo")); + hrnCfgEnvKeyRawZ(cfgOptRepoS3Key, 2, "mykey"); + hrnCfgEnvKeyRawZ(cfgOptRepoS3KeySecret, 2, "mysecretkey"); + hrnCfgArgRawZ(argList, cfgOptRepo, "2"); TEST_ERROR( harnessCfgLoad(cfgCmdArchiveGet, argList), OptionInvalidValueError, - "'bogus.bucket' is not valid for option 'repo1-s3-bucket'" + "'bogus.bucket' is not valid for option 'repo2-s3-bucket'" "\nHINT: RFC-2818 forbids dots in wildcard matches." "\nHINT: TLS/SSL verification cannot proceed with this bucket name." "\nHINT: remove dots from the bucket name."); + hrnCfgEnvKeyRemoveRaw(cfgOptRepoS3Key, 2); + hrnCfgEnvKeyRemoveRaw(cfgOptRepoS3KeySecret, 2); + // Invalid bucket name with verification disabled succeeds + hrnCfgEnvKeyRawZ(cfgOptRepoS3Key, 1, "mykey"); + hrnCfgEnvKeyRawZ(cfgOptRepoS3KeySecret, 1, "mysecretkey"); + argList = strLstNew(); strLstAdd(argList, strNew("--stanza=db")); hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/pg"); @@ -396,6 +485,26 @@ testRun(void) storageRepoWrite(), AssertError, "unable to get writable storage in dry-run mode or before dry-run is initialized"); lockRelease(true); + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error on multi-repo"); + + argList = strLstNew(); + strLstAddZ(argList, PROJECT_BIN); + hrnCfgArgRawZ(argList, cfgOptStanza, "db"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo1"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 2, "/repo2"); + strLstAddZ(argList, CFGCMD_EXPIRE); + + TEST_ERROR(cfgLoad(strLstSize(argList), strLstPtr(argList)), OptionInvalidValueError, "only repo1 may be configured"); + + argList = strLstNew(); + strLstAddZ(argList, PROJECT_BIN); + hrnCfgArgRawZ(argList, cfgOptStanza, "db"); + hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 2, "/repo2"); + strLstAddZ(argList, CFGCMD_EXPIRE); + + TEST_ERROR(cfgLoad(strLstSize(argList), strLstPtr(argList)), OptionInvalidValueError, "only repo1 may be configured"); + // Command does not have umask and disables keep-alives // ------------------------------------------------------------------------------------------------------------------------- argList = strLstNew(); @@ -519,7 +628,6 @@ testRun(void) strLstAdd(argList, strNewFmt("--log-path=%s", testPath())); hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to"); strLstAdd(argList, strNew("--process=1")); - hrnCfgArgRawZ(argList, cfgOptPg, "1"); strLstAddZ(argList, "--" CFGOPT_REMOTE_TYPE "=" PROTOCOL_REMOTE_TYPE_REPO); strLstAdd(argList, strNew("--log-level-file=warn")); hrnCfgArgRawZ(argList, cfgOptExecId, "1111-fe70d611"); diff --git a/test/src/module/config/parseTest.c b/test/src/module/config/parseTest.c index 2d04a945c..1a0c8cb44 100644 --- a/test/src/module/config/parseTest.c +++ b/test/src/module/config/parseTest.c @@ -1290,6 +1290,7 @@ testRun(void) TEST_RESULT_UINT(cfgOptionGroupIdxDefault(cfgOptGrpPg), 0, " pg1 is default"); TEST_RESULT_UINT(cfgOptionGroupIdxToKey(cfgOptGrpPg, 1), 2, " pg2 is index 2"); TEST_RESULT_STR_Z(cfgOptionStr(cfgOptPgPath), "/path/to/db", " default pg-path"); + TEST_RESULT_BOOL(cfgOptionGroupValid(cfgOptGrpPg), true, " pg group is valid"); TEST_RESULT_UINT(cfgOptionGroupIdxTotal(cfgOptGrpPg), 2, " pg1 and pg2 are set"); TEST_RESULT_BOOL(cfgOptionIdxBool(cfgOptPgLocal, 1), true, " pg2-local is set"); TEST_RESULT_BOOL(cfgOptionIdxTest(cfgOptPgHost, 1), false, " pg2-host is not set (pg2-local override)"); diff --git a/test/src/module/protocol/protocolTest.c b/test/src/module/protocol/protocolTest.c index db6c9060f..e306d365b 100644 --- a/test/src/module/protocol/protocolTest.c +++ b/test/src/module/protocol/protocolTest.c @@ -130,11 +130,19 @@ testRun(void) strLstAddZ(argList, "pgbackrest"); strLstAddZ(argList, "--stanza=test1"); hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/pg"); + strLstAddZ(argList, "--repo1-path=/repo-local"); + strLstAddZ(argList, "--repo4-path=/remote-host-new"); + strLstAddZ(argList, "--repo4-host=remote-host-new"); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); strLstAddZ(argList, "archive-get"); harnessCfgLoadRaw(strLstSize(argList), strLstPtr(argList)); TEST_RESULT_BOOL(repoIsLocal(0), true, "repo is local"); TEST_RESULT_VOID(repoIsLocalVerify(), " local verified"); + TEST_RESULT_VOID(repoIsLocalVerifyIdx(0), " local by index verified"); + TEST_ERROR_FMT( + repoIsLocalVerifyIdx(cfgOptionGroupIdxTotal(cfgOptGrpRepo) - 1), HostInvalidError, + "archive-get command must be run on the repository host"); // ------------------------------------------------------------------------------------------------------------------------- argList = strLstNew(); @@ -270,7 +278,7 @@ testRun(void) protocolRemoteParam(protocolStorageTypeRepo, 0), "-o\nLogLevel=error\n-o\nCompression=no\n-o\nPasswordAuthentication=no\nrepo-host-user@repo-host\n" "pgbackrest --exec-id=1-test --log-level-console=off --log-level-file=off --log-level-stderr=error" - " --pg1-path=/path/to/pg --process=0 --remote-type=repo --stanza=test1 archive-get:remote\n", + " --pg1-path=/path/to/pg --process=0 --remote-type=repo --repo=1 --stanza=test1 archive-get:remote\n", "remote protocol params"); // ------------------------------------------------------------------------------------------------------------------------- @@ -279,7 +287,7 @@ testRun(void) strLstAddZ(argList, "--stanza=test1"); strLstAddZ(argList, "--log-subprocess"); hrnCfgArgRawZ(argList, cfgOptPgPath, "/unused"); // Will be passed to remote (required) - hrnCfgArgRawZ(argList, cfgOptPgPort, "777"); // Not be passed to remote (required but has default) + hrnCfgArgRawZ(argList, cfgOptPgPort, "777"); // Not passed to remote (required but has default) strLstAddZ(argList, "--repo1-host=repo-host"); strLstAddZ(argList, "--repo1-host-port=444"); strLstAddZ(argList, "--repo1-host-config=/path/pgbackrest.conf"); @@ -294,7 +302,7 @@ testRun(void) "-o\nLogLevel=error\n-o\nCompression=no\n-o\nPasswordAuthentication=no\n-p\n444\nrepo-host-user@repo-host\n" "pgbackrest --config=/path/pgbackrest.conf --config-include-path=/path/include --config-path=/path/config" " --exec-id=1-test --log-level-console=off --log-level-file=info --log-level-stderr=error --log-subprocess" - " --pg1-path=/unused --process=0 --remote-type=repo --stanza=test1 check:remote\n", + " --pg1-path=/unused --process=0 --remote-type=repo --repo=1 --stanza=test1 check:remote\n", "remote protocol params with replacements"); // ------------------------------------------------------------------------------------------------------------------------- @@ -303,6 +311,7 @@ testRun(void) strLstAddZ(argList, "--stanza=test1"); hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/pg"); strLstAddZ(argList, "--process=3"); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); strLstAddZ(argList, "--" CFGOPT_REMOTE_TYPE "=" PROTOCOL_REMOTE_TYPE_REPO); strLstAddZ(argList, "--repo1-host=repo-host"); strLstAddZ(argList, CFGCMD_ARCHIVE_GET ":" CONFIG_COMMAND_ROLE_LOCAL); @@ -312,7 +321,7 @@ testRun(void) protocolRemoteParam(protocolStorageTypeRepo, 0), "-o\nLogLevel=error\n-o\nCompression=no\n-o\nPasswordAuthentication=no\npgbackrest@repo-host\n" "pgbackrest --exec-id=1-test --log-level-console=off --log-level-file=off --log-level-stderr=error" - " --pg1-path=/path/to/pg --process=3 --remote-type=repo --stanza=test1 archive-get:remote\n", + " --pg1-path=/path/to/pg --process=3 --remote-type=repo --repo=1 --stanza=test1 archive-get:remote\n", "remote protocol params for backup local"); // ------------------------------------------------------------------------------------------------------------------------- @@ -991,20 +1000,31 @@ testRun(void) BUFSTRDEF( "[global]\n" "repo1-cipher-type=aes-256-cbc\n" - "repo1-cipher-pass=dcba\n")); + "repo1-cipher-pass=dcba\n" + "repo2-cipher-type=aes-256-cbc\n" + "repo2-cipher-pass=xxxx\n")); argList = strLstNew(); strLstAddZ(argList, "--stanza=db"); + strLstAddZ(argList, "--pg1-path=/pg"); strLstAddZ(argList, "--protocol-timeout=10"); strLstAdd(argList, strNewFmt("--repo1-host-config=%s/pgbackrest.conf", testPath())); strLstAddZ(argList, "--repo1-host=localhost"); strLstAdd(argList, strNewFmt("--repo1-host-user=%s", testUser())); strLstAdd(argList, strNewFmt("--repo1-path=%s", testPath())); - harnessCfgLoad(cfgCmdInfo, argList); + strLstAdd(argList, strNewFmt("--repo2-host-config=%s/pgbackrest.conf", testPath())); + strLstAddZ(argList, "--repo2-host=localhost"); + strLstAdd(argList, strNewFmt("--repo2-host-user=%s", testUser())); + strLstAdd(argList, strNewFmt("--repo2-path=%s2", testPath())); + harnessCfgLoad(cfgCmdCheck, argList); - TEST_RESULT_PTR(cfgOptionStrNull(cfgOptRepoCipherPass), NULL, "check cipher pass before"); - TEST_ASSIGN(client, protocolRemoteGet(protocolStorageTypeRepo, 0), "get remote protocol"); - TEST_RESULT_STR_Z(cfgOptionStr(cfgOptRepoCipherPass), "dcba", "check cipher pass after"); + TEST_RESULT_PTR(cfgOptionIdxStrNull(cfgOptRepoCipherPass, 0), NULL, "check repo1 cipher pass before"); + TEST_ASSIGN(client, protocolRemoteGet(protocolStorageTypeRepo, 0), "get repo1 remote protocol"); + TEST_RESULT_STR_Z(cfgOptionIdxStr(cfgOptRepoCipherPass, 0), "dcba", "check repo1 cipher pass after"); + + TEST_RESULT_PTR(cfgOptionIdxStrNull(cfgOptRepoCipherPass, 1), NULL, "check repo2 cipher pass before"); + TEST_RESULT_VOID(protocolRemoteGet(protocolStorageTypeRepo, 1), "get repo2 remote protocol"); + TEST_RESULT_STR_Z(cfgOptionIdxStr(cfgOptRepoCipherPass, 1), "xxxx", "check repo2 cipher pass after"); TEST_RESULT_VOID(protocolFree(), "free remote protocol objects"); diff --git a/test/src/module/storage/remoteTest.c b/test/src/module/storage/remoteTest.c index bf2501df4..5df559839 100644 --- a/test/src/module/storage/remoteTest.c +++ b/test/src/module/storage/remoteTest.c @@ -29,17 +29,18 @@ testRun(void) strLstAddZ(argList, "--stanza=db"); strLstAddZ(argList, "--protocol-timeout=10"); strLstAddZ(argList, "--buffer-size=16384"); + hrnCfgArgKeyRawFmt(argList, cfgOptPgPath, 1, "%s/pg", testPath()); strLstAddZ(argList, "--repo1-host=localhost"); strLstAdd(argList, strNewFmt("--repo1-host-user=%s", testUser())); strLstAdd(argList, strNewFmt("--repo1-path=%s/repo", testPath())); - harnessCfgLoadRole(cfgCmdArchivePush, cfgCmdRoleLocal, argList); + hrnCfgArgRawZ(argList, cfgOptRepo, "1"); + harnessCfgLoadRole(cfgCmdArchiveGet, cfgCmdRoleLocal, argList); // Set type since we'll be running local and remote tests here cfgOptionSet(cfgOptRemoteType, cfgSourceParam, VARSTRDEF("repo")); - // Set pg settings so we can run both db and backup remotes + // Set pg host so we can run both pg and repo remotes cfgOptionSet(cfgOptPgHost, cfgSourceParam, VARSTRDEF("localhost")); - cfgOptionSet(cfgOptPgPath, cfgSourceParam, VARSTR(strNewFmt("%s/pg", testPath()))); // Start a protocol server to test the remote protocol Buffer *serverRead = bufNew(8192);