mirror of
https://github.com/postgres/postgres.git
synced 2025-11-09 06:21:09 +03:00
Generalize concept of temporary relations to "relation persistence".
This commit replaces pg_class.relistemp with pg_class.relpersistence; and also modifies the RangeVar node type to carry relpersistence rather than istemp. It also removes removes rd_istemp from RelationData and instead performs the correct computation based on relpersistence. For clarity, we add three new macros: RelationNeedsWAL(), RelationUsesLocalBuffers(), and RelationUsesTempNamespace(), so that we can clarify the purpose of each check that previous depended on rd_istemp. This is intended as infrastructure for the upcoming unlogged tables patch, as well as for future possible work on global temporary tables.
This commit is contained in:
@@ -304,7 +304,7 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack, GinStatsData *buildStats)
|
||||
|
||||
MarkBufferDirty(stack->buffer);
|
||||
|
||||
if (!btree->index->rd_istemp)
|
||||
if (RelationNeedsWAL(btree->index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
|
||||
@@ -373,7 +373,7 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack, GinStatsData *buildStats)
|
||||
MarkBufferDirty(lbuffer);
|
||||
MarkBufferDirty(stack->buffer);
|
||||
|
||||
if (!btree->index->rd_istemp)
|
||||
if (RelationNeedsWAL(btree->index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
|
||||
@@ -422,7 +422,7 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack, GinStatsData *buildStats)
|
||||
MarkBufferDirty(rbuffer);
|
||||
MarkBufferDirty(stack->buffer);
|
||||
|
||||
if (!btree->index->rd_istemp)
|
||||
if (RelationNeedsWAL(btree->index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ writeListPage(Relation index, Buffer buffer,
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecData rdata[2];
|
||||
ginxlogInsertListPage data;
|
||||
@@ -384,7 +384,7 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
|
||||
*/
|
||||
MarkBufferDirty(metabuffer);
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
|
||||
@@ -564,7 +564,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
|
||||
MarkBufferDirty(buffers[i]);
|
||||
}
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems)
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata[2];
|
||||
@@ -325,7 +325,7 @@ ginbuild(PG_FUNCTION_ARGS)
|
||||
GinInitBuffer(RootBuffer, GIN_LEAF);
|
||||
MarkBufferDirty(RootBuffer);
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata;
|
||||
|
||||
@@ -410,7 +410,7 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
|
||||
|
||||
MarkBufferDirty(metabuffer);
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
ginxlogUpdateMeta data;
|
||||
|
||||
@@ -93,7 +93,7 @@ xlogVacuumPage(Relation index, Buffer buffer)
|
||||
|
||||
Assert(GinPageIsLeaf(page));
|
||||
|
||||
if (index->rd_istemp)
|
||||
if (!RelationNeedsWAL(index))
|
||||
return;
|
||||
|
||||
data.node = index->rd_node;
|
||||
@@ -308,7 +308,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
|
||||
MarkBufferDirty(lBuffer);
|
||||
MarkBufferDirty(dBuffer);
|
||||
|
||||
if (!gvs->index->rd_istemp)
|
||||
if (RelationNeedsWAL(gvs->index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata[4];
|
||||
|
||||
@@ -115,7 +115,7 @@ gistbuild(PG_FUNCTION_ARGS)
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
if (!index->rd_istemp)
|
||||
if (RelationNeedsWAL(index))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata;
|
||||
@@ -401,7 +401,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
|
||||
dist->page = BufferGetPage(dist->buffer);
|
||||
}
|
||||
|
||||
if (!state->r->rd_istemp)
|
||||
if (RelationNeedsWAL(state->r))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData *rdata;
|
||||
@@ -465,7 +465,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
|
||||
|
||||
MarkBufferDirty(state->stack->buffer);
|
||||
|
||||
if (!state->r->rd_istemp)
|
||||
if (RelationNeedsWAL(state->r))
|
||||
{
|
||||
OffsetNumber noffs = 0,
|
||||
offs[1];
|
||||
@@ -550,7 +550,7 @@ gistfindleaf(GISTInsertState *state, GISTSTATE *giststate)
|
||||
opaque = GistPageGetOpaque(state->stack->page);
|
||||
|
||||
state->stack->lsn = PageGetLSN(state->stack->page);
|
||||
Assert(state->r->rd_istemp || !XLogRecPtrIsInvalid(state->stack->lsn));
|
||||
Assert(!RelationNeedsWAL(state->r) || !XLogRecPtrIsInvalid(state->stack->lsn));
|
||||
|
||||
if (state->stack->blkno != GIST_ROOT_BLKNO &&
|
||||
XLByteLT(state->stack->parent->lsn, opaque->nsn))
|
||||
@@ -911,7 +911,7 @@ gistmakedeal(GISTInsertState *state, GISTSTATE *giststate)
|
||||
}
|
||||
|
||||
/* say to xlog that insert is completed */
|
||||
if (state->needInsertComplete && !state->r->rd_istemp)
|
||||
if (state->needInsertComplete && RelationNeedsWAL(state->r))
|
||||
gistxlogInsertCompletion(state->r->rd_node, &(state->key), 1);
|
||||
}
|
||||
|
||||
@@ -1011,7 +1011,7 @@ gistnewroot(Relation r, Buffer buffer, IndexTuple *itup, int len, ItemPointer ke
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
if (!r->rd_istemp)
|
||||
if (RelationNeedsWAL(r))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData *rdata;
|
||||
|
||||
@@ -248,7 +248,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
|
||||
PageIndexTupleDelete(page, todelete[i]);
|
||||
GistMarkTuplesDeleted(page);
|
||||
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
XLogRecData *rdata;
|
||||
XLogRecPtr recptr;
|
||||
|
||||
@@ -124,7 +124,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
|
||||
*
|
||||
* During a rescan, don't make a new strategy object if we don't have to.
|
||||
*/
|
||||
if (!scan->rs_rd->rd_istemp &&
|
||||
if (!RelationUsesLocalBuffers(scan->rs_rd) &&
|
||||
scan->rs_nblocks > NBuffers / 4)
|
||||
{
|
||||
allow_strat = scan->rs_allow_strat;
|
||||
@@ -905,7 +905,7 @@ relation_open(Oid relationId, LOCKMODE lockmode)
|
||||
elog(ERROR, "could not open relation with OID %u", relationId);
|
||||
|
||||
/* Make note that we've accessed a temporary relation */
|
||||
if (r->rd_istemp)
|
||||
if (RelationUsesLocalBuffers(r))
|
||||
MyXactAccessedTempRel = true;
|
||||
|
||||
pgstat_initstats(r);
|
||||
@@ -951,7 +951,7 @@ try_relation_open(Oid relationId, LOCKMODE lockmode)
|
||||
elog(ERROR, "could not open relation with OID %u", relationId);
|
||||
|
||||
/* Make note that we've accessed a temporary relation */
|
||||
if (r->rd_istemp)
|
||||
if (RelationUsesLocalBuffers(r))
|
||||
MyXactAccessedTempRel = true;
|
||||
|
||||
pgstat_initstats(r);
|
||||
@@ -1917,7 +1917,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!(options & HEAP_INSERT_SKIP_WAL) && !relation->rd_istemp)
|
||||
if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
|
||||
{
|
||||
xl_heap_insert xlrec;
|
||||
xl_heap_header xlhdr;
|
||||
@@ -2227,7 +2227,7 @@ l1:
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!relation->rd_istemp)
|
||||
if (RelationNeedsWAL(relation))
|
||||
{
|
||||
xl_heap_delete xlrec;
|
||||
XLogRecPtr recptr;
|
||||
@@ -2780,7 +2780,7 @@ l2:
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!relation->rd_istemp)
|
||||
if (RelationNeedsWAL(relation))
|
||||
{
|
||||
XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
|
||||
newbuf, heaptup,
|
||||
@@ -3403,7 +3403,7 @@ l3:
|
||||
* (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
|
||||
* entries for everything anyway.)
|
||||
*/
|
||||
if (!relation->rd_istemp)
|
||||
if (RelationNeedsWAL(relation))
|
||||
{
|
||||
xl_heap_lock xlrec;
|
||||
XLogRecPtr recptr;
|
||||
@@ -3505,7 +3505,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!relation->rd_istemp)
|
||||
if (RelationNeedsWAL(relation))
|
||||
{
|
||||
xl_heap_inplace xlrec;
|
||||
XLogRecPtr recptr;
|
||||
@@ -3867,8 +3867,8 @@ log_heap_clean(Relation reln, Buffer buffer,
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata[4];
|
||||
|
||||
/* Caller should not call me on a temp relation */
|
||||
Assert(!reln->rd_istemp);
|
||||
/* Caller should not call me on a non-WAL-logged relation */
|
||||
Assert(RelationNeedsWAL(reln));
|
||||
|
||||
xlrec.node = reln->rd_node;
|
||||
xlrec.block = BufferGetBlockNumber(buffer);
|
||||
@@ -3950,8 +3950,8 @@ log_heap_freeze(Relation reln, Buffer buffer,
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata[2];
|
||||
|
||||
/* Caller should not call me on a temp relation */
|
||||
Assert(!reln->rd_istemp);
|
||||
/* Caller should not call me on a non-WAL-logged relation */
|
||||
Assert(RelationNeedsWAL(reln));
|
||||
/* nor when there are no tuples to freeze */
|
||||
Assert(offcnt > 0);
|
||||
|
||||
@@ -3996,8 +3996,8 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
|
||||
XLogRecData rdata[4];
|
||||
Page page = BufferGetPage(newbuf);
|
||||
|
||||
/* Caller should not call me on a temp relation */
|
||||
Assert(!reln->rd_istemp);
|
||||
/* Caller should not call me on a non-WAL-logged relation */
|
||||
Assert(RelationNeedsWAL(reln));
|
||||
|
||||
if (HeapTupleIsHeapOnly(newtup))
|
||||
info = XLOG_HEAP_HOT_UPDATE;
|
||||
@@ -4997,7 +4997,7 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
|
||||
* heap_sync - sync a heap, for use when no WAL has been written
|
||||
*
|
||||
* This forces the heap contents (including TOAST heap if any) down to disk.
|
||||
* If we skipped using WAL, and it's not a temp relation, we must force the
|
||||
* If we skipped using WAL, and WAL is otherwise needed, we must force the
|
||||
* relation down to disk before it's safe to commit the transaction. This
|
||||
* requires writing out any dirty buffers and then doing a forced fsync.
|
||||
*
|
||||
@@ -5010,8 +5010,8 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
|
||||
void
|
||||
heap_sync(Relation rel)
|
||||
{
|
||||
/* temp tables never need fsync */
|
||||
if (rel->rd_istemp)
|
||||
/* non-WAL-logged tables never need fsync */
|
||||
if (!RelationNeedsWAL(rel))
|
||||
return;
|
||||
|
||||
/* main heap */
|
||||
|
||||
@@ -233,7 +233,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
|
||||
/*
|
||||
* Emit a WAL HEAP_CLEAN record showing what we did
|
||||
*/
|
||||
if (!relation->rd_istemp)
|
||||
if (RelationNeedsWAL(relation))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
|
||||
|
||||
@@ -277,8 +277,8 @@ end_heap_rewrite(RewriteState state)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the rel isn't temp, must fsync before commit. We use heap_sync to
|
||||
* ensure that the toast table gets fsync'd too.
|
||||
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
|
||||
* to ensure that the toast table gets fsync'd too.
|
||||
*
|
||||
* It's obvious that we must do this when not WAL-logging. It's less
|
||||
* obvious that we have to do it even if we did WAL-log the pages. The
|
||||
@@ -287,7 +287,7 @@ end_heap_rewrite(RewriteState state)
|
||||
* occurring during the rewriteheap operation won't have fsync'd data we
|
||||
* wrote before the checkpoint.
|
||||
*/
|
||||
if (!state->rs_new_rel->rd_istemp)
|
||||
if (RelationNeedsWAL(state->rs_new_rel))
|
||||
heap_sync(state->rs_new_rel);
|
||||
|
||||
/* Deleting the context frees everything */
|
||||
|
||||
@@ -766,7 +766,7 @@ _bt_insertonpg(Relation rel,
|
||||
}
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
xl_btree_insert xlrec;
|
||||
BlockNumber xldownlink;
|
||||
@@ -1165,7 +1165,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
|
||||
}
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
xl_btree_split xlrec;
|
||||
uint8 xlinfo;
|
||||
@@ -1914,7 +1914,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
|
||||
MarkBufferDirty(metabuf);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
xl_btree_newroot xlrec;
|
||||
XLogRecPtr recptr;
|
||||
|
||||
@@ -224,7 +224,7 @@ _bt_getroot(Relation rel, int access)
|
||||
MarkBufferDirty(metabuf);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
xl_btree_newroot xlrec;
|
||||
XLogRecPtr recptr;
|
||||
@@ -452,7 +452,7 @@ _bt_checkpage(Relation rel, Buffer buf)
|
||||
static void
|
||||
_bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedXid)
|
||||
{
|
||||
if (rel->rd_istemp)
|
||||
if (!RelationNeedsWAL(rel))
|
||||
return;
|
||||
|
||||
/* No ereport(ERROR) until changes are logged */
|
||||
@@ -751,7 +751,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
|
||||
MarkBufferDirty(buf);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata[2];
|
||||
@@ -829,7 +829,7 @@ _bt_delitems_delete(Relation rel, Buffer buf,
|
||||
MarkBufferDirty(buf);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecData rdata[3];
|
||||
@@ -1365,7 +1365,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
|
||||
MarkBufferDirty(lbuf);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
xl_btree_delete_page xlrec;
|
||||
xl_btree_metadata xlmeta;
|
||||
|
||||
@@ -211,9 +211,9 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
|
||||
|
||||
/*
|
||||
* We need to log index creation in WAL iff WAL archiving/streaming is
|
||||
* enabled AND it's not a temp index.
|
||||
* enabled UNLESS the index isn't WAL-logged anyway.
|
||||
*/
|
||||
wstate.btws_use_wal = XLogIsNeeded() && !wstate.index->rd_istemp;
|
||||
wstate.btws_use_wal = XLogIsNeeded() && RelationNeedsWAL(wstate.index);
|
||||
|
||||
/* reserve the metapage */
|
||||
wstate.btws_pages_alloced = BTREE_METAPAGE + 1;
|
||||
@@ -797,9 +797,9 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
_bt_uppershutdown(wstate, state);
|
||||
|
||||
/*
|
||||
* If the index isn't temp, we must fsync it down to disk before it's safe
|
||||
* to commit the transaction. (For a temp index we don't care since the
|
||||
* index will be uninteresting after a crash anyway.)
|
||||
* If the index is WAL-logged, we must fsync it down to disk before it's
|
||||
* safe to commit the transaction. (For a non-WAL-logged index we don't
|
||||
* care since the index will be uninteresting after a crash anyway.)
|
||||
*
|
||||
* It's obvious that we must do this when not WAL-logging the build. It's
|
||||
* less obvious that we have to do it even if we did WAL-log the index
|
||||
@@ -811,7 +811,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
* fsync those pages here, they might still not be on disk when the crash
|
||||
* occurs.
|
||||
*/
|
||||
if (!wstate->index->rd_istemp)
|
||||
if (RelationNeedsWAL(wstate->index))
|
||||
{
|
||||
RelationOpenSmgr(wstate->index);
|
||||
smgrimmedsync(wstate->index->rd_smgr, MAIN_FORKNUM);
|
||||
|
||||
Reference in New Issue
Block a user