1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-24 00:23:06 +03:00

Skip WAL for new relfilenodes, under wal_level=minimal.

Until now, only selected bulk operations (e.g. COPY) did this.  If a
given relfilenode received both a WAL-skipping COPY and a WAL-logged
operation (e.g. INSERT), recovery could lose tuples from the COPY.  See
src/backend/access/transam/README section "Skipping WAL for New
RelFileNode" for the new coding rules.  Maintainers of table access
methods should examine that section.

To maintain data durability, just before commit, we choose between an
fsync of the relfilenode and copying its contents to WAL.  A new GUC,
wal_skip_threshold, guides that choice.  If this change slows a workload
that creates small, permanent relfilenodes under wal_level=minimal, try
adjusting wal_skip_threshold.  Users setting a timeout on COMMIT may
need to adjust that timeout, and log_min_duration_statement analysis
will reflect time consumption moving to COMMIT from commands like COPY.

Internally, this requires a reliable determination of whether
RollbackAndReleaseCurrentSubTransaction() would unlink a relation's
current relfilenode.  Introduce rd_firstRelfilenodeSubid.  Amend the
specification of rd_createSubid such that the field is zero when a new
rel has an old rd_node.  Make relcache.c retain entries for certain
dropped relations until end of transaction.

Back-patch to 9.5 (all supported versions).  This introduces a new WAL
record type, XLOG_GIST_ASSIGN_LSN, without bumping XLOG_PAGE_MAGIC.  As
always, update standby systems before master systems.  This changes
sizeof(RelationData) and sizeof(IndexStmt), breaking binary
compatibility for affected extensions.  (The most recent commit to
affect the same class of extensions was
089e4d405d0f3b94c74a2c6a54357a84a681754b.)

Kyotaro Horiguchi, reviewed (in earlier, similar versions) by Robert
Haas.  Heikki Linnakangas and Michael Paquier implemented earlier
designs that materially clarified the problem.  Reviewed, in earlier
designs, by Andrew Dunstan, Andres Freund, Alvaro Herrera, Tom Lane,
Fujii Masao, and Simon Riggs.  Reported by Martijn van Oosterhout.

Discussion: https://postgr.es/m/20150702220524.GA9392@svana.org
This commit is contained in:
Noah Misch
2020-03-21 09:38:26 -07:00
parent d3e572855b
commit cb2fd7eac2
51 changed files with 1440 additions and 364 deletions

View File

@@ -29,9 +29,13 @@
#include "miscadmin.h"
#include "storage/freespace.h"
#include "storage/smgr.h"
#include "utils/hsearch.h"
#include "utils/memutils.h"
#include "utils/rel.h"
/* GUC variables */
int wal_skip_threshold = 2048; /* in kilobytes */
/*
* We keep a list of all relations (represented as RelFileNode values)
* that have been created or deleted in the current transaction. When
@@ -61,7 +65,14 @@ typedef struct PendingRelDelete
struct PendingRelDelete *next; /* linked-list link */
} PendingRelDelete;
typedef struct pendingSync
{
RelFileNode rnode;
bool is_truncated; /* Has the file experienced truncation? */
} pendingSync;
static PendingRelDelete *pendingDeletes = NULL; /* head of linked list */
HTAB *pendingSyncHash = NULL;
/*
* RelationCreateStorage
@@ -117,6 +128,32 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
pending->next = pendingDeletes;
pendingDeletes = pending;
/* Queue an at-commit sync. */
if (relpersistence == RELPERSISTENCE_PERMANENT && !XLogIsNeeded())
{
pendingSync *pending;
bool found;
/* we sync only permanent relations */
Assert(backend == InvalidBackendId);
if (!pendingSyncHash)
{
HASHCTL ctl;
ctl.keysize = sizeof(RelFileNode);
ctl.entrysize = sizeof(pendingSync);
ctl.hcxt = TopTransactionContext;
pendingSyncHash =
hash_create("pending sync hash",
16, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
}
pending = hash_search(pendingSyncHash, &rnode, HASH_ENTER, &found);
Assert(!found);
pending->is_truncated = false;
}
return srel;
}
@@ -275,6 +312,8 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
}
}
RelationPreTruncate(rel);
/*
* We WAL-log the truncation before actually truncating, which means
* trouble if the truncation fails. If we then crash, the WAL replay
@@ -325,6 +364,28 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber);
}
/*
* RelationPreTruncate
* Perform AM-independent work before a physical truncation.
*
* If an access method's relation_nontransactional_truncate does not call
* RelationTruncate(), it must call this before decreasing the table size.
*/
void
RelationPreTruncate(Relation rel)
{
pendingSync *pending;
if (!pendingSyncHash)
return;
RelationOpenSmgr(rel);
pending = hash_search(pendingSyncHash, &(rel->rd_smgr->smgr_rnode.node),
HASH_FIND, NULL);
if (pending)
pending->is_truncated = true;
}
/*
* Copy a fork's data, block by block.
*
@@ -355,7 +416,9 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
/*
* We need to log the copied data in WAL iff WAL archiving/streaming is
* enabled AND it's a permanent relation.
* enabled AND it's a permanent relation. This gives the same answer as
* "RelationNeedsWAL(rel) || copying_initfork", because we know the
* current operation created a new relfilenode.
*/
use_wal = XLogIsNeeded() &&
(relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork);
@@ -397,24 +460,39 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
}
/*
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too. (For a temp or
* unlogged rel we don't care since the data will be gone after a crash
* anyway.)
*
* It's obvious that we must do this when not WAL-logging the copy. It's
* less obvious that we have to do it even if we did WAL-log the copied
* pages. The reason is that since we're copying outside shared buffers, a
* CHECKPOINT occurring during the copy has no way to flush the previously
* written data to disk (indeed it won't know the new rel even exists). A
* crash later on would replay WAL from the checkpoint, therefore it
* wouldn't replay our earlier WAL entries. If we do not fsync those pages
* here, they might still not be on disk when the crash occurs.
* When we WAL-logged rel pages, we must nonetheless fsync them. The
* reason is that since we're copying outside shared buffers, a CHECKPOINT
* occurring during the copy has no way to flush the previously written
* data to disk (indeed it won't know the new rel even exists). A crash
* later on would replay WAL from the checkpoint, therefore it wouldn't
* replay our earlier WAL entries. If we do not fsync those pages here,
* they might still not be on disk when the crash occurs.
*/
if (relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork)
if (use_wal || copying_initfork)
smgrimmedsync(dst, forkNum);
}
/*
* RelFileNodeSkippingWAL - check if a BM_PERMANENT relfilenode is using WAL
*
* Changes of certain relfilenodes must not write WAL; see "Skipping WAL for
* New RelFileNode" in src/backend/access/transam/README. Though it is
* known from Relation efficiently, this function is intended for the code
* paths not having access to Relation.
*/
bool
RelFileNodeSkippingWAL(RelFileNode rnode)
{
if (XLogIsNeeded())
return false; /* no permanent relfilenode skips WAL */
if (!pendingSyncHash ||
hash_search(pendingSyncHash, &rnode, HASH_FIND, NULL) == NULL)
return false;
return true;
}
/*
* smgrDoPendingDeletes() -- Take care of relation deletes at end of xact.
*
@@ -492,6 +570,144 @@ smgrDoPendingDeletes(bool isCommit)
}
}
/*
* smgrDoPendingSyncs() -- Take care of relation syncs at end of xact.
*/
void
smgrDoPendingSyncs(bool isCommit)
{
PendingRelDelete *pending;
int nrels = 0,
maxrels = 0;
SMgrRelation *srels = NULL;
HASH_SEQ_STATUS scan;
pendingSync *pendingsync;
if (XLogIsNeeded())
return; /* no relation can use this */
Assert(GetCurrentTransactionNestLevel() == 1);
if (!pendingSyncHash)
return; /* no relation needs sync */
/* Just throw away all pending syncs if any at rollback */
if (!isCommit)
{
pendingSyncHash = NULL;
return;
}
AssertPendingSyncs_RelationCache();
/* Skip syncing nodes that smgrDoPendingDeletes() will delete. */
for (pending = pendingDeletes; pending != NULL; pending = pending->next)
{
if (!pending->atCommit)
continue;
(void) hash_search(pendingSyncHash, (void *) &pending->relnode,
HASH_REMOVE, NULL);
}
hash_seq_init(&scan, pendingSyncHash);
while ((pendingsync = (pendingSync *) hash_seq_search(&scan)))
{
ForkNumber fork;
BlockNumber nblocks[MAX_FORKNUM + 1];
BlockNumber total_blocks = 0;
SMgrRelation srel;
srel = smgropen(pendingsync->rnode, InvalidBackendId);
/*
* We emit newpage WAL records for smaller relations.
*
* Small WAL records have a chance to be emitted along with other
* backends' WAL records. We emit WAL records instead of syncing for
* files that are smaller than a certain threshold, expecting faster
* commit. The threshold is defined by the GUC wal_skip_threshold.
*/
if (!pendingsync->is_truncated)
{
for (fork = 0; fork <= MAX_FORKNUM; fork++)
{
if (smgrexists(srel, fork))
{
BlockNumber n = smgrnblocks(srel, fork);
/* we shouldn't come here for unlogged relations */
Assert(fork != INIT_FORKNUM);
nblocks[fork] = n;
total_blocks += n;
}
else
nblocks[fork] = InvalidBlockNumber;
}
}
/*
* Sync file or emit WAL records for its contents.
*
* Although we emit WAL record if the file is small enough, do file
* sync regardless of the size if the file has experienced a
* truncation. It is because the file would be followed by trailing
* garbage blocks after a crash recovery if, while a past longer file
* had been flushed out, we omitted syncing-out of the file and
* emitted WAL instead. You might think that we could choose WAL if
* the current main fork is longer than ever, but there's a case where
* main fork is longer than ever but FSM fork gets shorter.
*/
if (pendingsync->is_truncated ||
total_blocks * BLCKSZ / 1024 >= wal_skip_threshold)
{
/* allocate the initial array, or extend it, if needed */
if (maxrels == 0)
{
maxrels = 8;
srels = palloc(sizeof(SMgrRelation) * maxrels);
}
else if (maxrels <= nrels)
{
maxrels *= 2;
srels = repalloc(srels, sizeof(SMgrRelation) * maxrels);
}
srels[nrels++] = srel;
}
else
{
/* Emit WAL records for all blocks. The file is small enough. */
for (fork = 0; fork <= MAX_FORKNUM; fork++)
{
int n = nblocks[fork];
Relation rel;
if (!BlockNumberIsValid(n))
continue;
/*
* Emit WAL for the whole file. Unfortunately we don't know
* what kind of a page this is, so we have to log the full
* page including any unused space. ReadBufferExtended()
* counts some pgstat events; unfortunately, we discard them.
*/
rel = CreateFakeRelcacheEntry(srel->smgr_rnode.node);
log_newpage_range(rel, fork, 0, n, false);
FreeFakeRelcacheEntry(rel);
}
}
}
pendingSyncHash = NULL;
if (nrels > 0)
{
smgrdosyncall(srels, nrels);
pfree(srels);
}
}
/*
* smgrGetPendingDeletes() -- Get a list of non-temp relations to be deleted.
*