1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-26 12:21:12 +03:00

Revert "Skip WAL for new relfilenodes, under wal_level=minimal."

This reverts commit cb2fd7eac2.  Per
numerous buildfarm members, it was incompatible with parallel query, and
a test case assumed LP64.  Back-patch to 9.5 (all supported versions).

Discussion: https://postgr.es/m/20200321224920.GB1763544@rfd.leadboat.com
This commit is contained in:
Noah Misch
2020-03-22 09:24:09 -07:00
parent d0587f52b3
commit de9396326e
51 changed files with 362 additions and 1438 deletions

View File

@ -66,7 +66,7 @@
#define BUF_WRITTEN 0x01
#define BUF_REUSABLE 0x02
#define RELS_BSEARCH_THRESHOLD 20
#define DROP_RELS_BSEARCH_THRESHOLD 20
typedef struct PrivateRefCountEntry
{
@ -105,19 +105,6 @@ typedef struct CkptTsStatus
int index;
} CkptTsStatus;
/*
* Type for array used to sort SMgrRelations
*
* FlushRelationsAllBuffers shares the same comparator function with
* DropRelFileNodesAllBuffers. Pointer to this struct and RelFileNode must be
* compatible.
*/
typedef struct SMgrSortArray
{
RelFileNode rnode; /* This must be the first member */
SMgrRelation srel;
} SMgrSortArray;
/* GUC variables */
bool zero_damaged_pages = false;
int bgwriter_lru_maxpages = 100;
@ -3003,7 +2990,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
* an exactly determined value, as it depends on many factors (CPU and RAM
* speeds, amount of shared buffers etc.).
*/
use_bsearch = n > RELS_BSEARCH_THRESHOLD;
use_bsearch = n > DROP_RELS_BSEARCH_THRESHOLD;
/* sort the list of rnodes if necessary */
if (use_bsearch)
@ -3253,104 +3240,6 @@ FlushRelationBuffers(Relation rel)
}
}
/* ---------------------------------------------------------------------
* FlushRelationsAllBuffers
*
* This function flushes out of the buffer pool all the pages of all
* forks of the specified smgr relations. It's equivalent to calling
* FlushRelationBuffers once per fork per relation. The relations are
* assumed not to use local buffers.
* --------------------------------------------------------------------
*/
void
FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
{
int i;
SMgrSortArray *srels;
bool use_bsearch;
if (nrels == 0)
return;
/* fill-in array for qsort */
srels = palloc(sizeof(SMgrSortArray) * nrels);
for (i = 0; i < nrels; i++)
{
Assert(!RelFileNodeBackendIsTemp(smgrs[i]->smgr_rnode));
srels[i].rnode = smgrs[i]->smgr_rnode.node;
srels[i].srel = smgrs[i];
}
/*
* Save the bsearch overhead for low number of relations to sync. See
* DropRelFileNodesAllBuffers for details.
*/
use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
/* sort the list of SMgrRelations if necessary */
if (use_bsearch)
pg_qsort(srels, nrels, sizeof(SMgrSortArray), rnode_comparator);
/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
for (i = 0; i < NBuffers; i++)
{
SMgrSortArray *srelent = NULL;
BufferDesc *bufHdr = GetBufferDescriptor(i);
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
*/
if (!use_bsearch)
{
int j;
for (j = 0; j < nrels; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, srels[j].rnode))
{
srelent = &srels[j];
break;
}
}
}
else
{
srelent = bsearch((const void *) &(bufHdr->tag.rnode),
srels, nrels, sizeof(SMgrSortArray),
rnode_comparator);
}
/* buffer doesn't belong to any of the given relfilenodes; skip it */
if (srelent == NULL)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, srelent->rnode) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
FlushBuffer(bufHdr, srelent->srel);
LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
UnpinBuffer(bufHdr, true);
}
else
UnlockBufHdr(bufHdr, buf_state);
}
pfree(srels);
}
/* ---------------------------------------------------------------------
* FlushDatabaseBuffers
*
@ -3552,15 +3441,13 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
(pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
{
/*
* If we must not write WAL, due to a relfilenode-specific
* condition or being in recovery, don't dirty the page. We can
* set the hint, just not dirty the page as a result so the hint
* is lost when we evict the page or shutdown.
* If we're in recovery we cannot dirty a page because of a hint.
* We can set the hint, just not dirty the page as a result so the
* hint is lost when we evict the page or shutdown.
*
* See src/backend/storage/page/README for longer discussion.
*/
if (RecoveryInProgress() ||
RelFileNodeSkippingWAL(bufHdr->tag.rnode))
if (RecoveryInProgress())
return;
/*