mirror of
https://github.com/postgres/postgres.git
synced 2025-08-28 18:48:04 +03:00
Revert "Skip WAL for new relfilenodes, under wal_level=minimal."
This reverts commit cb2fd7eac2
. Per
numerous buildfarm members, it was incompatible with parallel query, and
a test case assumed LP64. Back-patch to 9.5 (all supported versions).
Discussion: https://postgr.es/m/20200321224920.GB1763544@rfd.leadboat.com
This commit is contained in:
@@ -21,6 +21,7 @@
|
||||
* heap_multi_insert - insert multiple tuples into a relation
|
||||
* heap_delete - delete a tuple from a relation
|
||||
* heap_update - replace a tuple in a relation with another tuple
|
||||
* heap_sync - sync heap, for when no WAL has been written
|
||||
*
|
||||
* NOTES
|
||||
* This file contains the heap_ routines which implement
|
||||
@@ -1938,7 +1939,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (RelationNeedsWAL(relation))
|
||||
if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
|
||||
{
|
||||
xl_heap_insert xlrec;
|
||||
xl_heap_header xlhdr;
|
||||
@@ -2121,7 +2122,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
/* currently not needed (thus unsupported) for heap_multi_insert() */
|
||||
AssertArg(!(options & HEAP_INSERT_NO_LOGICAL));
|
||||
|
||||
needwal = RelationNeedsWAL(relation);
|
||||
needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation);
|
||||
saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
|
||||
HEAP_DEFAULT_FILLFACTOR);
|
||||
|
||||
@@ -8919,6 +8920,46 @@ heap2_redo(XLogReaderState *record)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* heap_sync - sync a heap, for use when no WAL has been written
|
||||
*
|
||||
* This forces the heap contents (including TOAST heap if any) down to disk.
|
||||
* If we skipped using WAL, and WAL is otherwise needed, we must force the
|
||||
* relation down to disk before it's safe to commit the transaction. This
|
||||
* requires writing out any dirty buffers and then doing a forced fsync.
|
||||
*
|
||||
* Indexes are not touched. (Currently, index operations associated with
|
||||
* the commands that use this are WAL-logged and so do not need fsync.
|
||||
* That behavior might change someday, but in any case it's likely that
|
||||
* any fsync decisions required would be per-index and hence not appropriate
|
||||
* to be done here.)
|
||||
*/
|
||||
void
|
||||
heap_sync(Relation rel)
|
||||
{
|
||||
/* non-WAL-logged tables never need fsync */
|
||||
if (!RelationNeedsWAL(rel))
|
||||
return;
|
||||
|
||||
/* main heap */
|
||||
FlushRelationBuffers(rel);
|
||||
/* FlushRelationBuffers will have opened rd_smgr */
|
||||
smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
|
||||
|
||||
/* FSM is not critical, don't bother syncing it */
|
||||
|
||||
/* toast heap, if any */
|
||||
if (OidIsValid(rel->rd_rel->reltoastrelid))
|
||||
{
|
||||
Relation toastrel;
|
||||
|
||||
toastrel = table_open(rel->rd_rel->reltoastrelid, AccessShareLock);
|
||||
FlushRelationBuffers(toastrel);
|
||||
smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
|
||||
table_close(toastrel, AccessShareLock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask a heap page before performing consistency checks on it.
|
||||
*/
|
||||
|
@@ -555,6 +555,17 @@ tuple_lock_retry:
|
||||
return result;
|
||||
}
|
||||
|
||||
static void
|
||||
heapam_finish_bulk_insert(Relation relation, int options)
|
||||
{
|
||||
/*
|
||||
* If we skipped writing WAL, then we need to sync the heap (but not
|
||||
* indexes since those use WAL anyway / don't go through tableam)
|
||||
*/
|
||||
if (options & HEAP_INSERT_SKIP_WAL)
|
||||
heap_sync(relation);
|
||||
}
|
||||
|
||||
|
||||
/* ------------------------------------------------------------------------
|
||||
* DDL related callbacks for heap AM.
|
||||
@@ -687,6 +698,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
||||
IndexScanDesc indexScan;
|
||||
TableScanDesc tableScan;
|
||||
HeapScanDesc heapScan;
|
||||
bool use_wal;
|
||||
bool is_system_catalog;
|
||||
Tuplesortstate *tuplesort;
|
||||
TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
|
||||
@@ -701,9 +713,12 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
||||
is_system_catalog = IsSystemRelation(OldHeap);
|
||||
|
||||
/*
|
||||
* Valid smgr_targblock implies something already wrote to the relation.
|
||||
* This may be harmless, but this function hasn't planned for it.
|
||||
* We need to log the copied data in WAL iff WAL archiving/streaming is
|
||||
* enabled AND it's a WAL-logged rel.
|
||||
*/
|
||||
use_wal = XLogIsNeeded() && RelationNeedsWAL(NewHeap);
|
||||
|
||||
/* use_wal off requires smgr_targblock be initially invalid */
|
||||
Assert(RelationGetTargetBlock(NewHeap) == InvalidBlockNumber);
|
||||
|
||||
/* Preallocate values/isnull arrays */
|
||||
@@ -713,7 +728,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
||||
|
||||
/* Initialize the rewrite operation */
|
||||
rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
|
||||
*multi_cutoff);
|
||||
*multi_cutoff, use_wal);
|
||||
|
||||
|
||||
/* Set up sorting if wanted */
|
||||
@@ -2510,6 +2525,7 @@ static const TableAmRoutine heapam_methods = {
|
||||
.tuple_delete = heapam_tuple_delete,
|
||||
.tuple_update = heapam_tuple_update,
|
||||
.tuple_lock = heapam_tuple_lock,
|
||||
.finish_bulk_insert = heapam_finish_bulk_insert,
|
||||
|
||||
.tuple_fetch_row_version = heapam_fetch_row_version,
|
||||
.tuple_get_latest_tid = heap_get_latest_tid,
|
||||
|
@@ -136,6 +136,7 @@ typedef struct RewriteStateData
|
||||
Page rs_buffer; /* page currently being built */
|
||||
BlockNumber rs_blockno; /* block where page will go */
|
||||
bool rs_buffer_valid; /* T if any tuples in buffer */
|
||||
bool rs_use_wal; /* must we WAL-log inserts? */
|
||||
bool rs_logical_rewrite; /* do we need to do logical rewriting */
|
||||
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
|
||||
* tuple visibility */
|
||||
@@ -229,13 +230,15 @@ static void logical_end_heap_rewrite(RewriteState state);
|
||||
* oldest_xmin xid used by the caller to determine which tuples are dead
|
||||
* freeze_xid xid before which tuples will be frozen
|
||||
* cutoff_multi multixact before which multis will be removed
|
||||
* use_wal should the inserts to the new heap be WAL-logged?
|
||||
*
|
||||
* Returns an opaque RewriteState, allocated in current memory context,
|
||||
* to be used in subsequent calls to the other functions.
|
||||
*/
|
||||
RewriteState
|
||||
begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
|
||||
TransactionId freeze_xid, MultiXactId cutoff_multi)
|
||||
TransactionId freeze_xid, MultiXactId cutoff_multi,
|
||||
bool use_wal)
|
||||
{
|
||||
RewriteState state;
|
||||
MemoryContext rw_cxt;
|
||||
@@ -260,6 +263,7 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
|
||||
/* new_heap needn't be empty, just locked */
|
||||
state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
|
||||
state->rs_buffer_valid = false;
|
||||
state->rs_use_wal = use_wal;
|
||||
state->rs_oldest_xmin = oldest_xmin;
|
||||
state->rs_freeze_xid = freeze_xid;
|
||||
state->rs_cutoff_multi = cutoff_multi;
|
||||
@@ -318,7 +322,7 @@ end_heap_rewrite(RewriteState state)
|
||||
/* Write the last page, if any */
|
||||
if (state->rs_buffer_valid)
|
||||
{
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
if (state->rs_use_wal)
|
||||
log_newpage(&state->rs_new_rel->rd_node,
|
||||
MAIN_FORKNUM,
|
||||
state->rs_blockno,
|
||||
@@ -333,14 +337,18 @@ end_heap_rewrite(RewriteState state)
|
||||
}
|
||||
|
||||
/*
|
||||
* When we WAL-logged rel pages, we must nonetheless fsync them. The
|
||||
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
|
||||
* to ensure that the toast table gets fsync'd too.
|
||||
*
|
||||
* It's obvious that we must do this when not WAL-logging. It's less
|
||||
* obvious that we have to do it even if we did WAL-log the pages. The
|
||||
* reason is the same as in storage.c's RelationCopyStorage(): we're
|
||||
* writing data that's not in shared buffers, and so a CHECKPOINT
|
||||
* occurring during the rewriteheap operation won't have fsync'd data we
|
||||
* wrote before the checkpoint.
|
||||
*/
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
smgrimmedsync(state->rs_new_rel->rd_smgr, MAIN_FORKNUM);
|
||||
heap_sync(state->rs_new_rel);
|
||||
|
||||
logical_end_heap_rewrite(state);
|
||||
|
||||
@@ -638,6 +646,9 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
|
||||
{
|
||||
int options = HEAP_INSERT_SKIP_FSM;
|
||||
|
||||
if (!state->rs_use_wal)
|
||||
options |= HEAP_INSERT_SKIP_WAL;
|
||||
|
||||
/*
|
||||
* While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
|
||||
* for the TOAST table are not logically decoded. The main heap is
|
||||
@@ -676,7 +687,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
|
||||
/* Doesn't fit, so write out the existing page */
|
||||
|
||||
/* XLOG stuff */
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
if (state->rs_use_wal)
|
||||
log_newpage(&state->rs_new_rel->rd_node,
|
||||
MAIN_FORKNUM,
|
||||
state->rs_blockno,
|
||||
|
Reference in New Issue
Block a user