1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-27 12:41:57 +03:00

8.4 pgindent run, with new combined Linux/FreeBSD/MinGW typedef list

provided by Andrew.
This commit is contained in:
Bruce Momjian
2009-06-11 14:49:15 +00:00
parent 4e86efb4e5
commit d747140279
654 changed files with 11900 additions and 11387 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.276 2009/06/10 18:54:16 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.277 2009/06/11 14:48:53 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -69,7 +69,7 @@
/* GUC variable */
bool synchronize_seqscans = true;
bool synchronize_seqscans = true;
static HeapScanDesc heap_beginscan_internal(Relation relation,
@ -116,9 +116,9 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of
* these behaviors, independently of the size of the table; also there
* is a GUC variable that can disable synchronized scanning.)
* (However, some callers need to be able to disable one or both of these
* behaviors, independently of the size of the table; also there is a GUC
* variable that can disable synchronized scanning.)
*
* During a rescan, don't make a new strategy object if we don't have to.
*/
@ -146,8 +146,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
if (is_rescan)
{
/*
* If rescan, keep the previous startblock setting so that rewinding
* a cursor doesn't generate surprising results. Reset the syncscan
* If rescan, keep the previous startblock setting so that rewinding a
* cursor doesn't generate surprising results. Reset the syncscan
* setting, though.
*/
scan->rs_syncscan = (allow_sync && synchronize_seqscans);
@ -1793,7 +1793,7 @@ void
FreeBulkInsertState(BulkInsertState bistate)
{
if (bistate->current_buf != InvalidBuffer)
ReleaseBuffer(bistate->current_buf);
ReleaseBuffer(bistate->current_buf);
FreeAccessStrategy(bistate->strategy);
pfree(bistate);
}
@ -1977,7 +1977,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/* Clear the bit in the visibility map if necessary */
if (all_visible_cleared)
visibilitymap_clear(relation,
visibilitymap_clear(relation,
ItemPointerGetBlockNumber(&(heaptup->t_self)));
/*
@ -3437,8 +3437,8 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
* Don't update the visibility map here. Locking a tuple doesn't
* change visibility info.
* Don't update the visibility map here. Locking a tuple doesn't change
* visibility info.
*/
/*
@ -4115,11 +4115,11 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
nowunused, nunused,
clean_move);
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
/*
* Note: we don't worry about updating the page's prunability hints.
* At worst this will cause an extra prune cycle to occur soon.
* Note: we don't worry about updating the page's prunability hints. At
* worst this will cause an extra prune cycle to occur soon.
*/
PageSetLSN(page, lsn);
@ -4217,17 +4217,18 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
OffsetNumber offnum;
ItemId lp = NULL;
HeapTupleHeader htup;
BlockNumber blkno;
BlockNumber blkno;
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
* The visibility map always needs to be updated, even if the heap page
* is already up-to-date.
* The visibility map always needs to be updated, even if the heap page is
* already up-to-date.
*/
if (xlrec->all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
visibilitymap_clear(reln, blkno);
FreeFakeRelcacheEntry(reln);
}
@ -4294,17 +4295,18 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
xl_heap_header xlhdr;
uint32 newlen;
Size freespace;
BlockNumber blkno;
BlockNumber blkno;
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
* The visibility map always needs to be updated, even if the heap page
* is already up-to-date.
* The visibility map always needs to be updated, even if the heap page is
* already up-to-date.
*/
if (xlrec->all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
visibilitymap_clear(reln, blkno);
FreeFakeRelcacheEntry(reln);
}
@ -4361,7 +4363,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
if (offnum == InvalidOffsetNumber)
elog(PANIC, "heap_insert_redo: failed to add tuple");
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
@ -4374,8 +4376,8 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
/*
* If the page is running low on free space, update the FSM as well.
* Arbitrarily, our definition of "low" is less than 20%. We can't do
* much better than that without knowing the fill-factor for the table.
* Arbitrarily, our definition of "low" is less than 20%. We can't do much
* better than that without knowing the fill-factor for the table.
*
* XXX: We don't get here if the page was restored from full page image.
* We don't bother to update the FSM in that case, it doesn't need to be
@ -4410,12 +4412,13 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
Size freespace;
/*
* The visibility map always needs to be updated, even if the heap page
* is already up-to-date.
* The visibility map always needs to be updated, even if the heap page is
* already up-to-date.
*/
if (xlrec->all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
visibilitymap_clear(reln,
ItemPointerGetBlockNumber(&xlrec->target.tid));
FreeFakeRelcacheEntry(reln);
@ -4504,12 +4507,13 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
newt:;
/*
* The visibility map always needs to be updated, even if the heap page
* is already up-to-date.
* The visibility map always needs to be updated, even if the heap page is
* already up-to-date.
*/
if (xlrec->new_all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
visibilitymap_clear(reln, ItemPointerGetBlockNumber(&xlrec->newtid));
FreeFakeRelcacheEntry(reln);
}
@ -4595,7 +4599,7 @@ newsame:;
if (xlrec->new_all_visible_cleared)
PageClearAllVisible(page);
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
@ -4604,8 +4608,8 @@ newsame:;
/*
* If the page is running low on free space, update the FSM as well.
* Arbitrarily, our definition of "low" is less than 20%. We can't do
* much better than that without knowing the fill-factor for the table.
* Arbitrarily, our definition of "low" is less than 20%. We can't do much
* better than that without knowing the fill-factor for the table.
*
* However, don't update the FSM on HOT updates, because after crash
* recovery, either the old or the new tuple will certainly be dead and
@ -4619,7 +4623,7 @@ newsame:;
*/
if (!hot_update && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
}
static void

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.75 2009/01/01 17:23:35 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.76 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -64,7 +64,7 @@ static Buffer
ReadBufferBI(Relation relation, BlockNumber targetBlock,
BulkInsertState bistate)
{
Buffer buffer;
Buffer buffer;
/* If not bulk-insert, exactly like ReadBuffer */
if (!bistate)
@ -118,7 +118,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* happen if space is freed in that page after heap_update finds there's not
* enough there). In that case, the page will be pinned and locked only once.
*
* We normally use FSM to help us find free space. However,
* We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
* the end of the relation if the tuple won't fit on the current target page.
* This can save some cycles when we know the relation is new and doesn't
@ -133,7 +133,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
* insertions into the same relation. This keeps a pin on the current
* insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@ -186,7 +186,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
* on, as cached in the BulkInsertState or relcache entry. If that
* on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.17 2009/01/01 17:23:35 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.18 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,7 +30,7 @@
typedef struct
{
TransactionId new_prune_xid; /* new prune hint value for page */
int nredirected; /* numbers of entries in arrays below */
int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
/* arrays that accumulate indexes of items to be changed */
@ -159,21 +159,21 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/*
* Our strategy is to scan the page and make lists of items to change,
* then apply the changes within a critical section. This keeps as
* much logic as possible out of the critical section, and also ensures
* that WAL replay will work the same as the normal case.
* then apply the changes within a critical section. This keeps as much
* logic as possible out of the critical section, and also ensures that
* WAL replay will work the same as the normal case.
*
* First, inform inval.c that upcoming CacheInvalidateHeapTuple calls
* are nontransactional.
* First, inform inval.c that upcoming CacheInvalidateHeapTuple calls are
* nontransactional.
*/
if (redirect_move)
BeginNonTransactionalInvalidation();
/*
* Initialize the new pd_prune_xid value to zero (indicating no
* prunable tuples). If we find any tuples which may soon become
* prunable, we will save the lowest relevant XID in new_prune_xid.
* Also initialize the rest of our working state.
* Initialize the new pd_prune_xid value to zero (indicating no prunable
* tuples). If we find any tuples which may soon become prunable, we will
* save the lowest relevant XID in new_prune_xid. Also initialize the rest
* of our working state.
*/
prstate.new_prune_xid = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
@ -204,9 +204,9 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
}
/*
* Send invalidation messages for any tuples we are about to move.
* It is safe to do this now, even though we could theoretically still
* fail before making the actual page update, because a useless cache
* Send invalidation messages for any tuples we are about to move. It is
* safe to do this now, even though we could theoretically still fail
* before making the actual page update, because a useless cache
* invalidation doesn't hurt anything. Also, no one else can reload the
* tuples while we have exclusive buffer lock, so it's not too early to
* send the invals. This avoids sending the invals while inside the
@ -222,9 +222,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
{
/*
* Apply the planned item changes, then repair page fragmentation,
* and update the page's hint bit about whether it has free line
* pointers.
* Apply the planned item changes, then repair page fragmentation, and
* update the page's hint bit about whether it has free line pointers.
*/
heap_page_prune_execute(buffer,
prstate.redirected, prstate.nredirected,
@ -268,8 +267,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
{
/*
* If we didn't prune anything, but have found a new value for the
* pd_prune_xid field, update it and mark the buffer dirty.
* This is treated as a non-WAL-logged hint.
* pd_prune_xid field, update it and mark the buffer dirty. This is
* treated as a non-WAL-logged hint.
*
* Also clear the "page is full" flag if it is set, since there's no
* point in repeating the prune/defrag process until something else
@ -334,8 +333,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
* caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
* prstate showing the changes to be made. Items to be redirected are added
* caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
* prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@ -598,19 +597,19 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
else if (redirect_move && ItemIdIsRedirected(rootlp))
{
/*
* If we desire to eliminate LP_REDIRECT items by moving tuples,
* make a redirection entry for each redirected root item; this
* will cause heap_page_prune_execute to actually do the move.
* (We get here only when there are no DEAD tuples in the chain;
* otherwise the redirection entry was made above.)
* If we desire to eliminate LP_REDIRECT items by moving tuples, make
* a redirection entry for each redirected root item; this will cause
* heap_page_prune_execute to actually do the move. (We get here only
* when there are no DEAD tuples in the chain; otherwise the
* redirection entry was made above.)
*/
heap_prune_record_redirect(prstate, rootoffnum, chainitems[1]);
redirect_target = chainitems[1];
}
/*
* If we are going to implement a redirect by moving tuples, we have
* to issue a cache invalidation against the redirection target tuple,
* If we are going to implement a redirect by moving tuples, we have to
* issue a cache invalidation against the redirection target tuple,
* because its CTID will be effectively changed by the move. Note that
* CacheInvalidateHeapTuple only queues the request, it doesn't send it;
* if we fail before reaching EndNonTransactionalInvalidation, nothing
@ -693,7 +692,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
* to replay the WAL record when needed after a crash. Note that the
* to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void

View File

@ -96,7 +96,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.17 2009/01/01 17:23:35 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.18 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -577,7 +577,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,
HEAP_INSERT_SKIP_FSM |
(state->rs_use_wal ?
0 : HEAP_INSERT_SKIP_WAL));
0 : HEAP_INSERT_SKIP_WAL));
else
heaptup = tup;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.92 2009/01/01 17:23:35 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.93 2009/06/11 14:48:54 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -1073,8 +1073,8 @@ toast_compress_datum(Datum value)
Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(value)));
/*
* No point in wasting a palloc cycle if value size is out of the
* allowed range for compression
* No point in wasting a palloc cycle if value size is out of the allowed
* range for compression
*/
if (valsize < PGLZ_strategy_default->min_input_size ||
valsize > PGLZ_strategy_default->max_input_size)
@ -1087,10 +1087,10 @@ toast_compress_datum(Datum value)
* because it might be satisfied with having saved as little as one byte
* in the compressed data --- which could turn into a net loss once you
* consider header and alignment padding. Worst case, the compressed
* format might require three padding bytes (plus header, which is included
* in VARSIZE(tmp)), whereas the uncompressed format would take only one
* header byte and no padding if the value is short enough. So we insist
* on a savings of more than 2 bytes to ensure we have a gain.
* format might require three padding bytes (plus header, which is
* included in VARSIZE(tmp)), whereas the uncompressed format would take
* only one header byte and no padding if the value is short enough. So
* we insist on a savings of more than 2 bytes to ensure we have a gain.
*/
if (pglz_compress(VARDATA_ANY(DatumGetPointer(value)), valsize,
(PGLZ_Header *) tmp, PGLZ_strategy_default) &&
@ -1130,7 +1130,7 @@ toast_save_datum(Relation rel, Datum value, int options)
struct
{
struct varlena hdr;
char data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */
char data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */
int32 align_it; /* ensure struct is aligned well enough */
} chunk_data;
int32 chunk_size;
@ -1295,8 +1295,8 @@ toast_delete_datum(Relation rel, Datum value)
/*
* Find all the chunks. (We don't actually care whether we see them in
* sequence or not, but since we've already locked the index we might
* as well use systable_beginscan_ordered.)
* sequence or not, but since we've already locked the index we might as
* well use systable_beginscan_ordered.)
*/
toastscan = systable_beginscan_ordered(toastrel, toastidx,
SnapshotToast, 1, &toastkey);
@ -1598,7 +1598,7 @@ toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length)
*/
nextidx = startchunk;
toastscan = systable_beginscan_ordered(toastrel, toastidx,
SnapshotToast, nscankeys, toastkey);
SnapshotToast, nscankeys, toastkey);
while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
{
/*

View File

@ -8,10 +8,10 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.3 2009/01/01 17:23:35 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.4 2009/06/11 14:48:54 momjian Exp $
*
* INTERFACE ROUTINES
* visibilitymap_clear - clear a bit in the visibility map
* visibilitymap_clear - clear a bit in the visibility map
* visibilitymap_pin - pin a map page for setting a bit
* visibilitymap_set - set a bit in a previously pinned page
* visibilitymap_test - test if a bit is set
@ -144,7 +144,7 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk)
mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer))
return; /* nothing to do */
return; /* nothing to do */
LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
map = PageGetContents(BufferGetPage(mapBuffer));
@ -295,10 +295,11 @@ void
visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
{
BlockNumber newnblocks;
/* last remaining block, byte, and bit */
BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
uint8 truncBit = HEAPBLK_TO_MAPBIT(nheapblocks);
uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
uint8 truncBit = HEAPBLK_TO_MAPBIT(nheapblocks);
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
@ -315,14 +316,14 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
* Unless the new size is exactly at a visibility map page boundary, the
* tail bits in the last remaining map page, representing truncated heap
* blocks, need to be cleared. This is not only tidy, but also necessary
* because we don't get a chance to clear the bits if the heap is
* extended again.
* because we don't get a chance to clear the bits if the heap is extended
* again.
*/
if (truncByte != 0 || truncBit != 0)
{
Buffer mapBuffer;
Page page;
char *map;
Buffer mapBuffer;
Page page;
char *map;
newnblocks = truncBlock + 1;
@ -344,11 +345,8 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
/*
* Mask out the unwanted bits of the last remaining byte.
*
* ((1 << 0) - 1) = 00000000
* ((1 << 1) - 1) = 00000001
* ...
* ((1 << 6) - 1) = 00111111
* ((1 << 7) - 1) = 01111111
* ((1 << 0) - 1) = 00000000 ((1 << 1) - 1) = 00000001 ... ((1 << 6) -
* 1) = 00111111 ((1 << 7) - 1) = 01111111
*/
map[truncByte] &= (1 << truncBit) - 1;
@ -368,8 +366,8 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
rel->rd_istemp);
/*
* Need to invalidate the relcache entry, because rd_vm_nblocks
* seen by other backends is no longer valid.
* Need to invalidate the relcache entry, because rd_vm_nblocks seen by
* other backends is no longer valid.
*/
if (!InRecovery)
CacheInvalidateRelcache(rel);
@ -386,7 +384,7 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
static Buffer
vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
{
Buffer buf;
Buffer buf;
RelationOpenSmgr(rel);
@ -433,20 +431,20 @@ static void
vm_extend(Relation rel, BlockNumber vm_nblocks)
{
BlockNumber vm_nblocks_now;
Page pg;
Page pg;
pg = (Page) palloc(BLCKSZ);
PageInit(pg, BLCKSZ, 0);
/*
* We use the relation extension lock to lock out other backends trying
* to extend the visibility map at the same time. It also locks out
* extension of the main fork, unnecessarily, but extending the
* visibility map happens seldom enough that it doesn't seem worthwhile to
* have a separate lock tag type for it.
* We use the relation extension lock to lock out other backends trying to
* extend the visibility map at the same time. It also locks out extension
* of the main fork, unnecessarily, but extending the visibility map
* happens seldom enough that it doesn't seem worthwhile to have a
* separate lock tag type for it.
*
* Note that another backend might have extended or created the
* relation before we get the lock.
* Note that another backend might have extended or created the relation
* before we get the lock.
*/
LockRelationForExtension(rel, ExclusiveLock);