1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-12 05:01:15 +03:00

Phase 2 of pgindent updates.

Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.

Commit e3860ffa4d wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code.  The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there.  BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs.  So the
net result is that in about half the cases, such comments are placed
one tab stop left of before.  This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.

Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.

This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.

Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
This commit is contained in:
Tom Lane
2017-06-21 15:18:54 -04:00
parent f669c09989
commit c7b8998ebb
1107 changed files with 3433 additions and 3514 deletions

View File

@@ -357,7 +357,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
itemsz, BrinMaxItemSize, RelationGetRelationName(idxrel))));
return InvalidOffsetNumber; /* keep compiler quiet */
return InvalidOffsetNumber; /* keep compiler quiet */
}
/* Make sure the revmap is long enough to contain the entry we need */
@@ -823,7 +823,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
itemsz, freespace, RelationGetRelationName(irel))));
return InvalidBuffer; /* keep compiler quiet */
return InvalidBuffer; /* keep compiler quiet */
}
if (newblk != oldblk)

View File

@@ -48,7 +48,7 @@ struct BrinRevmap
{
Relation rm_irel;
BlockNumber rm_pagesPerRange;
BlockNumber rm_lastRevmapPage; /* cached from the metapage */
BlockNumber rm_lastRevmapPage; /* cached from the metapage */
Buffer rm_metaBuf;
Buffer rm_currBuf;
};

View File

@@ -356,7 +356,7 @@ nocachegetattr(HeapTuple tuple,
HeapTupleHeader tup = tuple->t_data;
Form_pg_attribute *att = tupleDesc->attrs;
char *tp; /* ptr to data part of tuple */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
bool slow = false; /* do we have to walk attrs? */
int off; /* current offset within data */
@@ -762,7 +762,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
HeapTupleHeaderSetNatts(td, numberOfAttributes);
td->t_hoff = hoff;
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
heap_fill_tuple(tupleDescriptor,
@@ -941,7 +941,7 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
int attnum;
char *tp; /* ptr to tuple data */
long off; /* offset in tuple data */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
bool slow = false; /* can we use/set attcacheoff? */
natts = HeapTupleHeaderGetNatts(tup);
@@ -1043,7 +1043,7 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
int attnum;
char *tp; /* ptr to tuple data */
long off; /* offset in tuple data */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
bool slow; /* can we use/set attcacheoff? */
/*
@@ -1151,7 +1151,7 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract system attribute from virtual tuple");
if (tuple == &(slot->tts_minhdr)) /* internal error */
if (tuple == &(slot->tts_minhdr)) /* internal error */
elog(ERROR, "cannot extract system attribute from minimal tuple");
return heap_getsysattr(tuple, attnum, tupleDesc, isnull);
}
@@ -1337,7 +1337,7 @@ slot_attisnull(TupleTableSlot *slot, int attnum)
{
if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract system attribute from virtual tuple");
if (tuple == &(slot->tts_minhdr)) /* internal error */
if (tuple == &(slot->tts_minhdr)) /* internal error */
elog(ERROR, "cannot extract system attribute from minimal tuple");
return heap_attisnull(tuple, attnum);
}
@@ -1446,7 +1446,7 @@ heap_form_minimal_tuple(TupleDesc tupleDescriptor,
HeapTupleHeaderSetNatts(tuple, numberOfAttributes);
tuple->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
tuple->t_infomask = HEAP_HASOID;
heap_fill_tuple(tupleDescriptor,

View File

@@ -103,7 +103,7 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
case INT4OID:
{
int32 num = DatumGetInt32(value);
char str[12]; /* sign, 10 digits and '\0' */
char str[12]; /* sign, 10 digits and '\0' */
pg_ltoa(num, str);
pq_sendcountedtext(&buf, str, strlen(str), false);
@@ -113,7 +113,7 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
case INT8OID:
{
int64 num = DatumGetInt64(value);
char str[23]; /* sign, 21 digits and '\0' */
char str[23]; /* sign, 21 digits and '\0' */
pg_lltoa(num, str);
pq_sendcountedtext(&buf, str, strlen(str), false);

View File

@@ -188,7 +188,7 @@ convert_tuples_by_position(TupleDesc indesc,
n = indesc->natts + 1; /* +1 for NULL */
map->invalues = (Datum *) palloc(n * sizeof(Datum));
map->inisnull = (bool *) palloc(n * sizeof(bool));
map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->inisnull[0] = true;
return map;
@@ -267,7 +267,7 @@ convert_tuples_by_name(TupleDesc indesc,
n = indesc->natts + 1; /* +1 for NULL */
map->invalues = (Datum *) palloc(n * sizeof(Datum));
map->inisnull = (bool *) palloc(n * sizeof(bool));
map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->inisnull[0] = true;
return map;

View File

@@ -116,7 +116,7 @@ ginInitBA(BuildAccumulator *accum)
cmpEntryAccumulator,
ginCombineData,
ginAllocEntryAccumulator,
NULL, /* no freefunc needed */
NULL, /* no freefunc needed */
(void *) accum);
}

View File

@@ -913,8 +913,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
* Remember next page - it will become the new list head
*/
blkno = GinPageGetOpaque(page)->rightlink;
UnlockReleaseBuffer(buffer); /* shiftList will do exclusive
* locking */
UnlockReleaseBuffer(buffer); /* shiftList will do exclusive
* locking */
/*
* remove read pages from pending list, at this point all content

View File

@@ -362,7 +362,7 @@ ginNewScanKey(IndexScanDesc scan)
{
if (nullFlags[j])
{
nullFlags[j] = true; /* not any other nonzero value */
nullFlags[j] = true; /* not any other nonzero value */
hasNullQuery = true;
}
}

View File

@@ -650,7 +650,7 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
vacuum_delay_point();
}
if (blkno == InvalidBlockNumber) /* rightmost page */
if (blkno == InvalidBlockNumber) /* rightmost page */
break;
buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,

View File

@@ -1442,7 +1442,7 @@ initGISTstate(Relation index)
giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
giststate->scanCxt = scanCxt;
giststate->tempCxt = scanCxt; /* caller must change this if needed */
giststate->tempCxt = scanCxt; /* caller must change this if needed */
giststate->tupdesc = index->rd_att;
for (i = 0; i < index->rd_att->natts; i++)

View File

@@ -814,7 +814,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
downlinks, ndownlinks, downlinkoffnum,
InvalidBlockNumber, InvalidOffsetNumber);
list_free_deep(splitinfo); /* we don't need this anymore */
list_free_deep(splitinfo); /* we don't need this anymore */
}
else
UnlockReleaseBuffer(buffer);

View File

@@ -709,7 +709,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
* page seen so far. Skip the remaining columns and move
* on to the next page, if any.
*/
zero_penalty = false; /* so outer loop won't exit */
zero_penalty = false; /* so outer loop won't exit */
break;
}
}

View File

@@ -147,7 +147,7 @@ gistindex_keytest(IndexScanDesc scan,
{
int i;
if (GistPageIsLeaf(page)) /* shouldn't happen */
if (GistPageIsLeaf(page)) /* shouldn't happen */
elog(ERROR, "invalid GiST tuple found on leaf page");
for (i = 0; i < scan->numberOfOrderBys; i++)
so->distances[i] = -get_float8_infinity();

View File

@@ -412,7 +412,7 @@ hash_any(register const unsigned char *k, register int keylen)
a += k[0];
/* case 0: nothing left to add */
}
#endif /* WORDS_BIGENDIAN */
#endif /* WORDS_BIGENDIAN */
}
else
{
@@ -429,7 +429,7 @@ hash_any(register const unsigned char *k, register int keylen)
a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24));
b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24));
c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24));
#endif /* WORDS_BIGENDIAN */
#endif /* WORDS_BIGENDIAN */
mix(a, b, c);
k += 12;
len -= 12;
@@ -492,7 +492,7 @@ hash_any(register const unsigned char *k, register int keylen)
a += k[0];
/* case 0: nothing left to add */
}
#endif /* WORDS_BIGENDIAN */
#endif /* WORDS_BIGENDIAN */
}
final(a, b, c);

View File

@@ -462,7 +462,7 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup))
break; /* yes, so exit for-loop */
break; /* yes, so exit for-loop */
}
/* Before leaving current page, deal with any killed items */
@@ -519,7 +519,7 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup))
break; /* yes, so exit for-loop */
break; /* yes, so exit for-loop */
}
/* Before leaving current page, deal with any killed items */

View File

@@ -207,7 +207,7 @@ _hash_get_totalbuckets(uint32 splitpoint_phase)
/* account for buckets within splitpoint_group */
phases_within_splitpoint_group =
(((splitpoint_phase - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) &
HASH_SPLITPOINT_PHASE_MASK) + 1); /* from 0-based to 1-based */
HASH_SPLITPOINT_PHASE_MASK) + 1); /* from 0-based to 1-based */
total_buckets +=
(((1 << (splitpoint_group - 1)) >> HASH_SPLITPOINT_PHASE_BITS) *
phases_within_splitpoint_group);

View File

@@ -521,15 +521,15 @@ heapgettup(HeapScanDesc scan,
}
}
else
page = scan->rs_startblock; /* first page */
page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineoff = FirstOffsetNumber; /* first offnum */
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
}
else
{
/* continue from previously returned page/tuple */
page = scan->rs_cblock; /* current page */
page = scan->rs_cblock; /* current page */
lineoff = /* next offnum */
OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
}
@@ -577,7 +577,7 @@ heapgettup(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
page = scan->rs_cblock; /* current page */
page = scan->rs_cblock; /* current page */
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
@@ -823,7 +823,7 @@ heapgettup_pagemode(HeapScanDesc scan,
}
}
else
page = scan->rs_startblock; /* first page */
page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineindex = 0;
scan->rs_inited = true;
@@ -831,7 +831,7 @@ heapgettup_pagemode(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
page = scan->rs_cblock; /* current page */
page = scan->rs_cblock; /* current page */
lineindex = scan->rs_cindex + 1;
}
@@ -876,7 +876,7 @@ heapgettup_pagemode(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
page = scan->rs_cblock; /* current page */
page = scan->rs_cblock; /* current page */
}
dp = BufferGetPage(scan->rs_cbuf);
@@ -1088,7 +1088,7 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
)
);
}
#endif /* defined(DISABLE_COMPLEX_MACRO) */
#endif /* defined(DISABLE_COMPLEX_MACRO) */
/* ----------------------------------------------------------------
@@ -1787,7 +1787,7 @@ heap_update_snapshot(HeapScanDesc scan, Snapshot snapshot)
#define HEAPDEBUG_1
#define HEAPDEBUG_2
#define HEAPDEBUG_3
#endif /* !defined(HEAPDEBUGALL) */
#endif /* !defined(HEAPDEBUGALL) */
HeapTuple
@@ -2623,7 +2623,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
HeapTupleHeaderSetXminFrozen(tup->t_data);
HeapTupleHeaderSetCmin(tup->t_data, cid);
HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
tup->t_tableOid = RelationGetRelid(relation);
/*
@@ -4214,7 +4214,7 @@ l2:
HeapTupleClearHeapOnly(newtup);
}
RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
/* Clear obsolete visibility flags, possibly set by ourselves above... */
@@ -6361,7 +6361,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
{
Assert(!TransactionIdDidCommit(xid));
*flags |= FRM_INVALIDATE_XMAX;
xid = InvalidTransactionId; /* not strictly necessary */
xid = InvalidTransactionId; /* not strictly necessary */
}
else
{

View File

@@ -329,7 +329,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
if (otherBuffer != InvalidBuffer)
otherBlock = BufferGetBlockNumber(otherBuffer);
else
otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
/*
* We first try to put the tuple on the same page we last inserted a tuple

View File

@@ -31,8 +31,7 @@
typedef struct
{
TransactionId new_prune_xid; /* new prune hint value for page */
TransactionId latestRemovedXid; /* latest xid to be removed by this
* prune */
TransactionId latestRemovedXid; /* latest xid to be removed by this prune */
int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
@@ -149,8 +148,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
TransactionId ignore = InvalidTransactionId; /* return value not
* needed */
TransactionId ignore = InvalidTransactionId; /* return value not
* needed */
/* OK to prune */
(void) heap_page_prune(relation, buffer, OldestXmin, true, &ignore);

View File

@@ -146,22 +146,22 @@ typedef struct RewriteStateData
BlockNumber rs_blockno; /* block where page will go */
bool rs_buffer_valid; /* T if any tuples in buffer */
bool rs_use_wal; /* must we WAL-log inserts? */
bool rs_logical_rewrite; /* do we need to do logical rewriting */
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
* determine tuple visibility */
bool rs_logical_rewrite; /* do we need to do logical rewriting */
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
* tuple visibility */
TransactionId rs_freeze_xid; /* Xid that will be used as freeze cutoff
* point */
TransactionId rs_logical_xmin; /* Xid that will be used as cutoff
* point for logical rewrites */
TransactionId rs_logical_xmin; /* Xid that will be used as cutoff point
* for logical rewrites */
MultiXactId rs_cutoff_multi; /* MultiXactId that will be used as cutoff
* point for multixacts */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
* them */
XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
HTAB *rs_logical_mappings; /* logical remapping files */
uint32 rs_num_rewrite_mappings; /* # in memory mappings */
uint32 rs_num_rewrite_mappings; /* # in memory mappings */
} RewriteStateData;
/*
@@ -216,8 +216,8 @@ typedef struct RewriteMappingFile
*/
typedef struct RewriteMappingDataEntry
{
LogicalRewriteMappingData map; /* map between old and new location of
* the tuple */
LogicalRewriteMappingData map; /* map between old and new location of the
* tuple */
dlist_node node;
} RewriteMappingDataEntry;
@@ -655,7 +655,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
else
heaptup = tup;
len = MAXALIGN(heaptup->t_len); /* be conservative */
len = MAXALIGN(heaptup->t_len); /* be conservative */
/*
* If we're gonna fail for oversize tuple, do it right away

View File

@@ -1523,7 +1523,7 @@ toast_save_datum(Relation rel, Datum value,
{
data_p = VARDATA_SHORT(dval);
data_todo = VARSIZE_SHORT(dval) - VARHDRSZ_SHORT;
toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
toast_pointer.va_extsize = data_todo;
}
else if (VARATT_IS_COMPRESSED(dval))

View File

@@ -83,7 +83,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
scan->heapRelation = NULL; /* may be set later */
scan->indexRelation = indexRelation;
scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */
scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */
scan->numberOfKeys = nkeys;
scan->numberOfOrderBys = norderbys;

View File

@@ -326,7 +326,7 @@ index_rescan(IndexScanDesc scan,
scan->xs_continue_hot = false;
scan->kill_prior_tuple = false; /* for safety */
scan->kill_prior_tuple = false; /* for safety */
scan->indexRelation->rd_amroutine->amrescan(scan, keys, nkeys,
orderbys, norderbys);
@@ -401,7 +401,7 @@ index_restrpos(IndexScanDesc scan)
scan->xs_continue_hot = false;
scan->kill_prior_tuple = false; /* for safety */
scan->kill_prior_tuple = false; /* for safety */
scan->indexRelation->rd_amroutine->amrestrpos(scan);
}

View File

@@ -36,7 +36,7 @@ typedef struct
OffsetNumber newitemoff; /* where the new item is to be inserted */
int leftspace; /* space available for items on left page */
int rightspace; /* space available for items on right page */
int olddataitemstotal; /* space taken by old items */
int olddataitemstotal; /* space taken by old items */
bool have_split; /* found a valid split? */

View File

@@ -59,7 +59,7 @@ typedef struct
IndexBulkDeleteCallback callback;
void *callback_state;
BTCycleId cycleid;
BlockNumber lastBlockVacuumed; /* highest blkno actually vacuumed */
BlockNumber lastBlockVacuumed; /* highest blkno actually vacuumed */
BlockNumber lastBlockLocked; /* highest blkno we've cleanup-locked */
BlockNumber totFreePages; /* true total # of free pages */
MemoryContext pagedelcontext;
@@ -95,9 +95,8 @@ typedef struct BTParallelScanDescData
BTPS_State btps_pageStatus; /* indicates whether next page is
* available for scan. see above for
* possible states of parallel scan. */
int btps_arrayKeyCount; /* count indicating number of array
* scan keys processed by parallel
* scan */
int btps_arrayKeyCount; /* count indicating number of array scan
* keys processed by parallel scan */
slock_t btps_mutex; /* protects above variables */
ConditionVariable btps_cv; /* used to synchronize parallel scan */
} BTParallelScanDescData;
@@ -187,7 +186,7 @@ btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
#ifdef BTREE_BUILD_STATS
if (log_btree_build_stats)
ResetUsage();
#endif /* BTREE_BUILD_STATS */
#endif /* BTREE_BUILD_STATS */
/*
* We expect to be called exactly once for any index relation. If that's
@@ -234,7 +233,7 @@ btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
ShowUsage("BTREE BUILD STATS");
ResetUsage();
}
#endif /* BTREE_BUILD_STATS */
#endif /* BTREE_BUILD_STATS */
/*
* Return statistics

View File

@@ -466,7 +466,7 @@ _bt_compare(Relation rel,
datum = index_getattr(itup, scankey->sk_attno, itupdesc, &isNull);
/* see comments about NULLs handling in btbuild */
if (scankey->sk_flags & SK_ISNULL) /* key is NULL */
if (scankey->sk_flags & SK_ISNULL) /* key is NULL */
{
if (isNull)
result = 0; /* NULL "=" NULL */

View File

@@ -111,7 +111,7 @@ typedef struct BTPageState
OffsetNumber btps_lastoff; /* last item offset loaded */
uint32 btps_level; /* tree level (0 = leaf) */
Size btps_full; /* "full" if less than this much free space */
struct BTPageState *btps_next; /* link to parent level, if any */
struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
/*
@@ -122,8 +122,8 @@ typedef struct BTWriteState
Relation heap;
Relation index;
bool btws_use_wal; /* dump pages to WAL? */
BlockNumber btws_pages_alloced; /* # pages allocated */
BlockNumber btws_pages_written; /* # pages written out */
BlockNumber btws_pages_alloced; /* # pages allocated */
BlockNumber btws_pages_written; /* # pages written out */
Page btws_zeropage; /* workspace for filling zeroes */
} BTWriteState;
@@ -208,7 +208,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
ShowUsage("BTREE BUILD (Spool) STATISTICS");
ResetUsage();
}
#endif /* BTREE_BUILD_STATS */
#endif /* BTREE_BUILD_STATS */
tuplesort_performsort(btspool->sortstate);
if (btspool2)
@@ -566,7 +566,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
oopaque->btpo_next = nblkno;
nopaque->btpo_prev = oblkno;
nopaque->btpo_next = P_NONE; /* redundant */
nopaque->btpo_next = P_NONE; /* redundant */
}
/*

View File

@@ -26,7 +26,7 @@
const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false},
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false}

View File

@@ -1004,7 +1004,7 @@ doPickSplit(Relation index, SpGistState *state,
insertedNew = true;
}
for (i = 0; i < nToInsert; i++)
leafPageSelect[i] = 0; /* signifies current page */
leafPageSelect[i] = 0; /* signifies current page */
}
else if (in.nTuples == 1 && totalLeafSizes > SPGIST_PAGE_CAPACITY)
{
@@ -1076,12 +1076,12 @@ doPickSplit(Relation index, SpGistState *state,
{
if (leafSizes[i] <= curspace)
{
nodePageSelect[i] = 0; /* signifies current page */
nodePageSelect[i] = 0; /* signifies current page */
curspace -= leafSizes[i];
}
else
{
nodePageSelect[i] = 1; /* signifies new leaf page */
nodePageSelect[i] = 1; /* signifies new leaf page */
newspace -= leafSizes[i];
}
}

View File

@@ -29,7 +29,7 @@ typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr,
typedef struct ScanStackEntry
{
Datum reconstructedValue; /* value reconstructed from parent */
Datum reconstructedValue; /* value reconstructed from parent */
void *traversalValue; /* opclass-specific traverse value */
int level; /* level of items on this page */
ItemPointerData ptr; /* block and offset to scan from */

View File

@@ -34,7 +34,7 @@ typedef struct spgVacPendingItem
{
ItemPointerData tid; /* redirection target to visit */
bool done; /* have we dealt with this? */
struct spgVacPendingItem *next; /* list link */
struct spgVacPendingItem *next; /* list link */
} spgVacPendingItem;
/* Local state for vacuum operations */
@@ -48,7 +48,7 @@ typedef struct spgBulkDeleteState
/* Additional working state */
SpGistState spgstate; /* for SPGiST operations that need one */
spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
TransactionId myXmin; /* for detecting newly-added redirects */
BlockNumber lastFilledBlock; /* last non-deletable block */
} spgBulkDeleteState;

View File

@@ -149,7 +149,7 @@ void
TransactionIdSetTreeStatus(TransactionId xid, int nsubxids,
TransactionId *subxids, XidStatus status, XLogRecPtr lsn)
{
int pageno = TransactionIdToPage(xid); /* get page of parent */
int pageno = TransactionIdToPage(xid); /* get page of parent */
int i;
Assert(status == TRANSACTION_STATUS_COMMITTED ||

View File

@@ -76,7 +76,7 @@ typedef struct SlruFlushData
{
int num_files; /* # files actually open */
int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
} SlruFlushData;
typedef struct SlruFlushData *SlruFlush;
@@ -150,10 +150,10 @@ SimpleLruShmemSize(int nslots, int nlsns)
sz = MAXALIGN(sizeof(SlruSharedData));
sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
@@ -972,9 +972,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
int bestvalidslot = 0; /* keep compiler quiet */
int best_valid_delta = -1;
int best_valid_page_number = 0; /* keep compiler quiet */
int bestinvalidslot = 0; /* keep compiler quiet */
int bestinvalidslot = 0; /* keep compiler quiet */
int best_invalid_delta = -1;
int best_invalid_page_number = 0; /* keep compiler quiet */
int best_invalid_page_number = 0; /* keep compiler quiet */
/* See if page already has a buffer assigned */
for (slotno = 0; slotno < shared->num_slots; slotno++)

View File

@@ -261,7 +261,7 @@ findNewestTimeLine(TimeLineID startTLI)
{
if (existsTimeLineHistory(probeTLI))
{
newestTLI = probeTLI; /* probeTLI exists */
newestTLI = probeTLI; /* probeTLI exists */
}
else
{

View File

@@ -164,7 +164,7 @@ typedef struct GlobalTransactionData
* track of the end LSN because that is the LSN we need to wait for prior
* to commit.
*/
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
TransactionId xid; /* The GXACT id */
@@ -898,7 +898,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
/*
* Header for a 2PC state file
*/
#define TWOPHASE_MAGIC 0x57F94533 /* format identifier */
#define TWOPHASE_MAGIC 0x57F94533 /* format identifier */
typedef struct TwoPhaseFileHeader
{
@@ -1024,7 +1024,7 @@ StartPrepare(GlobalTransaction gxact)
hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels);
hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs,
&hdr.initfileinval);
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
save_state_data(gxact->gid, hdr.gidlen);

View File

@@ -27,7 +27,7 @@ const TwoPhaseCallback twophase_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] =
lock_twophase_recover, /* Lock */
NULL, /* pgstat */
multixact_twophase_recover, /* MultiXact */
predicatelock_twophase_recover /* PredicateLock */
predicatelock_twophase_recover /* PredicateLock */
};
const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
@@ -35,7 +35,7 @@ const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
NULL, /* END ID */
lock_twophase_postcommit, /* Lock */
pgstat_twophase_postcommit, /* pgstat */
multixact_twophase_postcommit, /* MultiXact */
multixact_twophase_postcommit, /* MultiXact */
NULL /* PredicateLock */
};
@@ -44,14 +44,14 @@ const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] =
NULL, /* END ID */
lock_twophase_postabort, /* Lock */
pgstat_twophase_postabort, /* pgstat */
multixact_twophase_postabort, /* MultiXact */
multixact_twophase_postabort, /* MultiXact */
NULL /* PredicateLock */
};
const TwoPhaseCallback twophase_standby_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] =
{
NULL, /* END ID */
lock_twophase_standby_recover, /* Lock */
lock_twophase_standby_recover, /* Lock */
NULL, /* pgstat */
NULL, /* MultiXact */
NULL /* PredicateLock */

View File

@@ -177,18 +177,18 @@ typedef struct TransactionStateData
TBlockState blockState; /* high-level state */
int nestingLevel; /* transaction nesting depth */
int gucNestLevel; /* GUC context nesting depth */
MemoryContext curTransactionContext; /* my xact-lifetime context */
MemoryContext curTransactionContext; /* my xact-lifetime context */
ResourceOwner curTransactionOwner; /* my query resources */
TransactionId *childXids; /* subcommitted child XIDs, in XID order */
int nChildXids; /* # of subcommitted child XIDs */
int maxChildXids; /* allocated size of childXids[] */
Oid prevUser; /* previous CurrentUserId setting */
int prevSecContext; /* previous SecurityRestrictionContext */
bool prevXactReadOnly; /* entry-time xact r/o state */
bool startedInRecovery; /* did we start in recovery? */
bool prevXactReadOnly; /* entry-time xact r/o state */
bool startedInRecovery; /* did we start in recovery? */
bool didLogXid; /* has xid been included in WAL record? */
int parallelModeLevel; /* Enter/ExitParallelMode counter */
struct TransactionStateData *parent; /* back link to parent */
int parallelModeLevel; /* Enter/ExitParallelMode counter */
struct TransactionStateData *parent; /* back link to parent */
} TransactionStateData;
typedef TransactionStateData *TransactionState;
@@ -2641,8 +2641,7 @@ CleanupTransaction(void)
* do abort cleanup processing
*/
AtCleanup_Portals(); /* now safe to release portal memory */
AtEOXact_Snapshot(false, true); /* and release the transaction's
* snapshots */
AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */
CurrentResourceOwner = NULL; /* and resource owner */
if (TopTransactionResourceOwner)
@@ -3769,7 +3768,7 @@ DefineSavepoint(char *name)
case TBLOCK_SUBINPROGRESS:
/* Normal subtransaction start */
PushTransaction();
s = CurrentTransactionState; /* changed by push */
s = CurrentTransactionState; /* changed by push */
/*
* Savepoint names, like the TransactionState block itself, live
@@ -4080,7 +4079,7 @@ BeginInternalSubTransaction(char *name)
case TBLOCK_SUBINPROGRESS:
/* Normal subtransaction start */
PushTransaction();
s = CurrentTransactionState; /* changed by push */
s = CurrentTransactionState; /* changed by push */
/*
* Savepoint names, like the TransactionState block itself, live

View File

@@ -86,8 +86,8 @@ extern uint32 bootstrap_data_checksum_version;
/* User-settable parameters */
int max_wal_size_mb = 1024; /* 1 GB */
int min_wal_size_mb = 80; /* 80 MB */
int max_wal_size_mb = 1024; /* 1 GB */
int min_wal_size_mb = 80; /* 80 MB */
int wal_keep_segments = 0;
int XLOGbuffers = -1;
int XLogArchiveTimeout = 0;
@@ -582,8 +582,7 @@ typedef struct XLogCtlData
XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */
XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG
* segment */
XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
/* Fake LSN counter, for unlogged relations. Protected by ulsn_lck. */
XLogRecPtr unloggedLSN;
@@ -784,7 +783,7 @@ static int readFile = -1;
static XLogSegNo readSegNo = 0;
static uint32 readOff = 0;
static uint32 readLen = 0;
static XLogSource readSource = 0; /* XLOG_FROM_* code */
static XLogSource readSource = 0; /* XLOG_FROM_* code */
/*
* Keeps track of which source we're currently reading from. This is
@@ -812,14 +811,14 @@ typedef struct XLogPageReadPrivate
* XLogReceiptSource tracks where we last successfully read some WAL.)
*/
static TimestampTz XLogReceiptTime = 0;
static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr; /* start of last record read */
static XLogRecPtr EndRecPtr; /* end+1 of last record read */
static XLogRecPtr minRecoveryPoint; /* local copy of
* ControlFile->minRecoveryPoint */
static XLogRecPtr minRecoveryPoint; /* local copy of
* ControlFile->minRecoveryPoint */
static TimeLineID minRecoveryPointTLI;
static bool updateMinRecoveryPoint = true;
@@ -2020,7 +2019,7 @@ XLogRecPtrToBytePos(XLogRecPtr ptr)
{
result = fullsegs * UsableBytesInSegment +
(XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
(fullpages - 1) * UsableBytesInPage; /* full pages */
(fullpages - 1) * UsableBytesInPage; /* full pages */
if (offset > 0)
{
Assert(offset >= SizeOfXLogShortPHD);
@@ -2508,7 +2507,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
/* signal that we need to wakeup walsenders later */
WalSndWakeupRequest();
LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
if (XLogArchivingActive())
XLogArchiveNotifySeg(openLogSegNo);
@@ -4377,7 +4376,7 @@ static void
WriteControlFile(void)
{
int fd;
char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
/*
* Initialize version and compatibility-check fields
@@ -6531,7 +6530,7 @@ StartupXLOG(void)
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
(uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
@@ -8835,7 +8834,7 @@ CreateCheckPoint(int flags)
if (shutdown)
{
if (flags & CHECKPOINT_END_OF_RECOVERY)
LocalXLogInsertAllowed = -1; /* return to "check" state */
LocalXLogInsertAllowed = -1; /* return to "check" state */
else
LocalXLogInsertAllowed = 0; /* never again write WAL */
}
@@ -9965,7 +9964,7 @@ xlog_outrec(StringInfo buf, XLogReaderState *record)
appendStringInfoString(buf, " FPW");
}
}
#endif /* WAL_DEBUG */
#endif /* WAL_DEBUG */
/*
* Returns a string describing an XLogRecord, consisting of its identity

View File

@@ -61,9 +61,9 @@ typedef struct
} registered_buffer;
static registered_buffer *registered_buffers;
static int max_registered_buffers; /* allocated size */
static int max_registered_block_id = 0; /* highest block_id + 1
* currently registered */
static int max_registered_buffers; /* allocated size */
static int max_registered_block_id = 0; /* highest block_id + 1 currently
* registered */
/*
* A chain of XLogRecDatas to hold the "main data" of a WAL record, registered
@@ -438,7 +438,7 @@ XLogInsert(RmgrId rmid, uint8 info)
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
XLogResetInsertion();
EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
return EndPos;
}

View File

@@ -974,7 +974,7 @@ out:
return found;
}
#endif /* FRONTEND */
#endif /* FRONTEND */
/* ----------------------------------------