mirror of
https://github.com/postgres/postgres.git
synced 2025-08-28 18:48:04 +03:00
Skip WAL for new relfilenodes, under wal_level=minimal.
Until now, only selected bulk operations (e.g. COPY) did this. If a given relfilenode received both a WAL-skipping COPY and a WAL-logged operation (e.g. INSERT), recovery could lose tuples from the COPY. See src/backend/access/transam/README section "Skipping WAL for New RelFileNode" for the new coding rules. Maintainers of table access methods should examine that section. To maintain data durability, just before commit, we choose between an fsync of the relfilenode and copying its contents to WAL. A new GUC, wal_skip_threshold, guides that choice. If this change slows a workload that creates small, permanent relfilenodes under wal_level=minimal, try adjusting wal_skip_threshold. Users setting a timeout on COMMIT may need to adjust that timeout, and log_min_duration_statement analysis will reflect time consumption moving to COMMIT from commands like COPY. Internally, this requires a reliable determination of whether RollbackAndReleaseCurrentSubTransaction() would unlink a relation's current relfilenode. Introduce rd_firstRelfilenodeSubid. Amend the specification of rd_createSubid such that the field is zero when a new rel has an old rd_node. Make relcache.c retain entries for certain dropped relations until end of transaction. Back-patch to 9.5 (all supported versions). This introduces a new WAL record type, XLOG_GIST_ASSIGN_LSN, without bumping XLOG_PAGE_MAGIC. As always, update standby systems before master systems. This changes sizeof(RelationData) and sizeof(IndexStmt), breaking binary compatibility for affected extensions. (The most recent commit to affect the same class of extensions was 089e4d405d0f3b94c74a2c6a54357a84a681754b.) Kyotaro Horiguchi, reviewed (in earlier, similar versions) by Robert Haas. Heikki Linnakangas and Michael Paquier implemented earlier designs that materially clarified the problem. Reviewed, in earlier designs, by Andrew Dunstan, Andres Freund, Alvaro Herrera, Tom Lane, Fujii Masao, and Simon Riggs. Reported by Martijn van Oosterhout. Discussion: https://postgr.es/m/20150702220524.GA9392@svana.org
This commit is contained in:
@@ -21,7 +21,6 @@
|
||||
* heap_multi_insert - insert multiple tuples into a relation
|
||||
* heap_delete - delete a tuple from a relation
|
||||
* heap_update - replace a tuple in a relation with another tuple
|
||||
* heap_sync - sync heap, for when no WAL has been written
|
||||
*
|
||||
* NOTES
|
||||
* This file contains the heap_ routines which implement
|
||||
@@ -1939,7 +1938,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
|
||||
if (RelationNeedsWAL(relation))
|
||||
{
|
||||
xl_heap_insert xlrec;
|
||||
xl_heap_header xlhdr;
|
||||
@@ -2122,7 +2121,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
/* currently not needed (thus unsupported) for heap_multi_insert() */
|
||||
AssertArg(!(options & HEAP_INSERT_NO_LOGICAL));
|
||||
|
||||
needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation);
|
||||
needwal = RelationNeedsWAL(relation);
|
||||
saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
|
||||
HEAP_DEFAULT_FILLFACTOR);
|
||||
|
||||
@@ -8920,46 +8919,6 @@ heap2_redo(XLogReaderState *record)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* heap_sync - sync a heap, for use when no WAL has been written
|
||||
*
|
||||
* This forces the heap contents (including TOAST heap if any) down to disk.
|
||||
* If we skipped using WAL, and WAL is otherwise needed, we must force the
|
||||
* relation down to disk before it's safe to commit the transaction. This
|
||||
* requires writing out any dirty buffers and then doing a forced fsync.
|
||||
*
|
||||
* Indexes are not touched. (Currently, index operations associated with
|
||||
* the commands that use this are WAL-logged and so do not need fsync.
|
||||
* That behavior might change someday, but in any case it's likely that
|
||||
* any fsync decisions required would be per-index and hence not appropriate
|
||||
* to be done here.)
|
||||
*/
|
||||
void
|
||||
heap_sync(Relation rel)
|
||||
{
|
||||
/* non-WAL-logged tables never need fsync */
|
||||
if (!RelationNeedsWAL(rel))
|
||||
return;
|
||||
|
||||
/* main heap */
|
||||
FlushRelationBuffers(rel);
|
||||
/* FlushRelationBuffers will have opened rd_smgr */
|
||||
smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
|
||||
|
||||
/* FSM is not critical, don't bother syncing it */
|
||||
|
||||
/* toast heap, if any */
|
||||
if (OidIsValid(rel->rd_rel->reltoastrelid))
|
||||
{
|
||||
Relation toastrel;
|
||||
|
||||
toastrel = table_open(rel->rd_rel->reltoastrelid, AccessShareLock);
|
||||
FlushRelationBuffers(toastrel);
|
||||
smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
|
||||
table_close(toastrel, AccessShareLock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask a heap page before performing consistency checks on it.
|
||||
*/
|
||||
|
@@ -555,17 +555,6 @@ tuple_lock_retry:
|
||||
return result;
|
||||
}
|
||||
|
||||
static void
|
||||
heapam_finish_bulk_insert(Relation relation, int options)
|
||||
{
|
||||
/*
|
||||
* If we skipped writing WAL, then we need to sync the heap (but not
|
||||
* indexes since those use WAL anyway / don't go through tableam)
|
||||
*/
|
||||
if (options & HEAP_INSERT_SKIP_WAL)
|
||||
heap_sync(relation);
|
||||
}
|
||||
|
||||
|
||||
/* ------------------------------------------------------------------------
|
||||
* DDL related callbacks for heap AM.
|
||||
@@ -698,7 +687,6 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
||||
IndexScanDesc indexScan;
|
||||
TableScanDesc tableScan;
|
||||
HeapScanDesc heapScan;
|
||||
bool use_wal;
|
||||
bool is_system_catalog;
|
||||
Tuplesortstate *tuplesort;
|
||||
TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
|
||||
@@ -713,12 +701,9 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
||||
is_system_catalog = IsSystemRelation(OldHeap);
|
||||
|
||||
/*
|
||||
* We need to log the copied data in WAL iff WAL archiving/streaming is
|
||||
* enabled AND it's a WAL-logged rel.
|
||||
* Valid smgr_targblock implies something already wrote to the relation.
|
||||
* This may be harmless, but this function hasn't planned for it.
|
||||
*/
|
||||
use_wal = XLogIsNeeded() && RelationNeedsWAL(NewHeap);
|
||||
|
||||
/* use_wal off requires smgr_targblock be initially invalid */
|
||||
Assert(RelationGetTargetBlock(NewHeap) == InvalidBlockNumber);
|
||||
|
||||
/* Preallocate values/isnull arrays */
|
||||
@@ -728,7 +713,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
||||
|
||||
/* Initialize the rewrite operation */
|
||||
rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
|
||||
*multi_cutoff, use_wal);
|
||||
*multi_cutoff);
|
||||
|
||||
|
||||
/* Set up sorting if wanted */
|
||||
@@ -2525,7 +2510,6 @@ static const TableAmRoutine heapam_methods = {
|
||||
.tuple_delete = heapam_tuple_delete,
|
||||
.tuple_update = heapam_tuple_update,
|
||||
.tuple_lock = heapam_tuple_lock,
|
||||
.finish_bulk_insert = heapam_finish_bulk_insert,
|
||||
|
||||
.tuple_fetch_row_version = heapam_fetch_row_version,
|
||||
.tuple_get_latest_tid = heap_get_latest_tid,
|
||||
|
@@ -136,7 +136,6 @@ typedef struct RewriteStateData
|
||||
Page rs_buffer; /* page currently being built */
|
||||
BlockNumber rs_blockno; /* block where page will go */
|
||||
bool rs_buffer_valid; /* T if any tuples in buffer */
|
||||
bool rs_use_wal; /* must we WAL-log inserts? */
|
||||
bool rs_logical_rewrite; /* do we need to do logical rewriting */
|
||||
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
|
||||
* tuple visibility */
|
||||
@@ -230,15 +229,13 @@ static void logical_end_heap_rewrite(RewriteState state);
|
||||
* oldest_xmin xid used by the caller to determine which tuples are dead
|
||||
* freeze_xid xid before which tuples will be frozen
|
||||
* cutoff_multi multixact before which multis will be removed
|
||||
* use_wal should the inserts to the new heap be WAL-logged?
|
||||
*
|
||||
* Returns an opaque RewriteState, allocated in current memory context,
|
||||
* to be used in subsequent calls to the other functions.
|
||||
*/
|
||||
RewriteState
|
||||
begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
|
||||
TransactionId freeze_xid, MultiXactId cutoff_multi,
|
||||
bool use_wal)
|
||||
TransactionId freeze_xid, MultiXactId cutoff_multi)
|
||||
{
|
||||
RewriteState state;
|
||||
MemoryContext rw_cxt;
|
||||
@@ -263,7 +260,6 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
|
||||
/* new_heap needn't be empty, just locked */
|
||||
state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
|
||||
state->rs_buffer_valid = false;
|
||||
state->rs_use_wal = use_wal;
|
||||
state->rs_oldest_xmin = oldest_xmin;
|
||||
state->rs_freeze_xid = freeze_xid;
|
||||
state->rs_cutoff_multi = cutoff_multi;
|
||||
@@ -322,7 +318,7 @@ end_heap_rewrite(RewriteState state)
|
||||
/* Write the last page, if any */
|
||||
if (state->rs_buffer_valid)
|
||||
{
|
||||
if (state->rs_use_wal)
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
log_newpage(&state->rs_new_rel->rd_node,
|
||||
MAIN_FORKNUM,
|
||||
state->rs_blockno,
|
||||
@@ -337,18 +333,14 @@ end_heap_rewrite(RewriteState state)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
|
||||
* to ensure that the toast table gets fsync'd too.
|
||||
*
|
||||
* It's obvious that we must do this when not WAL-logging. It's less
|
||||
* obvious that we have to do it even if we did WAL-log the pages. The
|
||||
* When we WAL-logged rel pages, we must nonetheless fsync them. The
|
||||
* reason is the same as in storage.c's RelationCopyStorage(): we're
|
||||
* writing data that's not in shared buffers, and so a CHECKPOINT
|
||||
* occurring during the rewriteheap operation won't have fsync'd data we
|
||||
* wrote before the checkpoint.
|
||||
*/
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
heap_sync(state->rs_new_rel);
|
||||
smgrimmedsync(state->rs_new_rel->rd_smgr, MAIN_FORKNUM);
|
||||
|
||||
logical_end_heap_rewrite(state);
|
||||
|
||||
@@ -646,9 +638,6 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
|
||||
{
|
||||
int options = HEAP_INSERT_SKIP_FSM;
|
||||
|
||||
if (!state->rs_use_wal)
|
||||
options |= HEAP_INSERT_SKIP_WAL;
|
||||
|
||||
/*
|
||||
* While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
|
||||
* for the TOAST table are not logically decoded. The main heap is
|
||||
@@ -687,7 +676,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
|
||||
/* Doesn't fit, so write out the existing page */
|
||||
|
||||
/* XLOG stuff */
|
||||
if (state->rs_use_wal)
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
log_newpage(&state->rs_new_rel->rd_node,
|
||||
MAIN_FORKNUM,
|
||||
state->rs_blockno,
|
||||
|
Reference in New Issue
Block a user