mirror of
https://github.com/postgres/postgres.git
synced 2025-11-12 05:01:15 +03:00
Post-feature-freeze pgindent run.
Discussion: https://postgr.es/m/15719.1523984266@sss.pgh.pa.us
This commit is contained in:
@@ -189,7 +189,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
|
||||
NULL, BUFFER_LOCK_SHARE, NULL);
|
||||
if (!lastPageTuple)
|
||||
{
|
||||
bool recorded;
|
||||
bool recorded;
|
||||
|
||||
recorded = AutoVacuumRequestWork(AVW_BRINSummarizeRange,
|
||||
RelationGetRelid(idxRel),
|
||||
|
||||
@@ -1685,8 +1685,8 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum)
|
||||
attno = slot->tts_nvalid;
|
||||
|
||||
/*
|
||||
* If tuple doesn't have all the atts indicated by attnum, read the
|
||||
* rest as NULLs or missing values
|
||||
* If tuple doesn't have all the atts indicated by attnum, read the rest
|
||||
* as NULLs or missing values
|
||||
*/
|
||||
if (attno < attnum)
|
||||
slot_getmissingattrs(slot, attno, attnum);
|
||||
|
||||
@@ -489,8 +489,8 @@ index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source,
|
||||
Assert(IndexTupleSize(truncated) <= IndexTupleSize(source));
|
||||
|
||||
/*
|
||||
* Cannot leak memory here, TupleDescCopy() doesn't allocate any
|
||||
* inner structure, so, plain pfree() should clean all allocated memory
|
||||
* Cannot leak memory here, TupleDescCopy() doesn't allocate any inner
|
||||
* structure, so, plain pfree() should clean all allocated memory
|
||||
*/
|
||||
pfree(truncdesc);
|
||||
|
||||
|
||||
@@ -1495,9 +1495,9 @@ index_reloptions(amoptions_function amoptions, Datum reloptions, bool validate)
|
||||
bytea *
|
||||
index_generic_reloptions(Datum reloptions, bool validate)
|
||||
{
|
||||
int numoptions;
|
||||
int numoptions;
|
||||
GenericIndexOpts *idxopts;
|
||||
relopt_value *options;
|
||||
relopt_value *options;
|
||||
static const relopt_parse_elt tab[] = {
|
||||
{"recheck_on_update", RELOPT_TYPE_BOOL, offsetof(GenericIndexOpts, recheck_on_update)}
|
||||
};
|
||||
@@ -1512,12 +1512,12 @@ index_generic_reloptions(Datum reloptions, bool validate)
|
||||
|
||||
idxopts = allocateReloptStruct(sizeof(GenericIndexOpts), options, numoptions);
|
||||
|
||||
fillRelOptions((void *)idxopts, sizeof(GenericIndexOpts), options, numoptions,
|
||||
fillRelOptions((void *) idxopts, sizeof(GenericIndexOpts), options, numoptions,
|
||||
validate, tab, lengthof(tab));
|
||||
|
||||
pfree(options);
|
||||
|
||||
return (bytea*) idxopts;
|
||||
return (bytea *) idxopts;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -521,12 +521,12 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
|
||||
{
|
||||
|
||||
PredicateLockPageSplit(btree->index,
|
||||
BufferGetBlockNumber(stack->buffer),
|
||||
BufferGetBlockNumber(lbuffer));
|
||||
BufferGetBlockNumber(stack->buffer),
|
||||
BufferGetBlockNumber(lbuffer));
|
||||
|
||||
PredicateLockPageSplit(btree->index,
|
||||
BufferGetBlockNumber(stack->buffer),
|
||||
BufferGetBlockNumber(rbuffer));
|
||||
BufferGetBlockNumber(stack->buffer),
|
||||
BufferGetBlockNumber(rbuffer));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -543,8 +543,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
|
||||
{
|
||||
|
||||
PredicateLockPageSplit(btree->index,
|
||||
BufferGetBlockNumber(stack->buffer),
|
||||
BufferGetBlockNumber(rbuffer));
|
||||
BufferGetBlockNumber(stack->buffer),
|
||||
BufferGetBlockNumber(rbuffer));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1812,8 +1812,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
|
||||
blkno = BufferGetBlockNumber(buffer);
|
||||
|
||||
/*
|
||||
* Copy a predicate lock from entry tree leaf (containing posting list)
|
||||
* to posting tree.
|
||||
* Copy a predicate lock from entry tree leaf (containing posting list) to
|
||||
* posting tree.
|
||||
*/
|
||||
PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
|
||||
|
||||
|
||||
@@ -42,11 +42,11 @@ static void
|
||||
GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
|
||||
{
|
||||
/*
|
||||
* When fast update is on then no need in locking pages, because we
|
||||
* anyway need to lock the whole index.
|
||||
* When fast update is on then no need in locking pages, because we anyway
|
||||
* need to lock the whole index.
|
||||
*/
|
||||
if (!GinGetUseFastUpdate(index))
|
||||
PredicateLockPage(index, blkno, snapshot);
|
||||
PredicateLockPage(index, blkno, snapshot);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -426,8 +426,8 @@ restartScanEntry:
|
||||
entry->buffer = stack->buffer;
|
||||
|
||||
/*
|
||||
* Predicate lock visited posting tree page, following pages
|
||||
* will be locked by moveRightIfItNeeded or entryLoadMoreItems
|
||||
* Predicate lock visited posting tree page, following pages will
|
||||
* be locked by moveRightIfItNeeded or entryLoadMoreItems
|
||||
*/
|
||||
GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
|
||||
|
||||
@@ -1779,9 +1779,9 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
|
||||
UnlockReleaseBuffer(metabuffer);
|
||||
|
||||
/*
|
||||
* If fast update is enabled, we acquire a predicate lock on the entire
|
||||
* relation as fast update postpones the insertion of tuples into index
|
||||
* structure due to which we can't detect rw conflicts.
|
||||
* If fast update is enabled, we acquire a predicate lock on the
|
||||
* entire relation as fast update postpones the insertion of tuples
|
||||
* into index structure due to which we can't detect rw conflicts.
|
||||
*/
|
||||
if (GinGetUseFastUpdate(scan->indexRelation))
|
||||
PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
|
||||
|
||||
@@ -519,12 +519,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
|
||||
|
||||
/*
|
||||
* With fastupdate on each scan and each insert begin with access to
|
||||
* pending list, so it effectively lock entire index. In this case
|
||||
* we aquire predicate lock and check for conflicts over index relation,
|
||||
* pending list, so it effectively lock entire index. In this case we
|
||||
* aquire predicate lock and check for conflicts over index relation,
|
||||
* and hope that it will reduce locking overhead.
|
||||
*
|
||||
* Do not use GinCheckForSerializableConflictIn() here, because
|
||||
* it will do nothing (it does actual work only with fastupdate off).
|
||||
* Do not use GinCheckForSerializableConflictIn() here, because it
|
||||
* will do nothing (it does actual work only with fastupdate off).
|
||||
* Check for conflicts for entire index.
|
||||
*/
|
||||
CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
|
||||
@@ -539,7 +539,7 @@ gininsert(Relation index, Datum *values, bool *isnull,
|
||||
}
|
||||
else
|
||||
{
|
||||
GinStatsData stats;
|
||||
GinStatsData stats;
|
||||
|
||||
/*
|
||||
* Fastupdate is off but if pending list isn't empty then we need to
|
||||
|
||||
@@ -341,8 +341,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
|
||||
ptr->page = BufferGetPage(ptr->buffer);
|
||||
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
|
||||
PredicateLockPageSplit(rel,
|
||||
BufferGetBlockNumber(buffer),
|
||||
BufferGetBlockNumber(ptr->buffer));
|
||||
BufferGetBlockNumber(buffer),
|
||||
BufferGetBlockNumber(ptr->buffer));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1220,8 +1220,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
|
||||
bool is_split;
|
||||
|
||||
/*
|
||||
* Check for any rw conflicts (in serialisation isolation level)
|
||||
* just before we intend to modify the page
|
||||
* Check for any rw conflicts (in serialisation isolation level) just
|
||||
* before we intend to modify the page
|
||||
*/
|
||||
CheckForSerializableConflictIn(state->r, NULL, stack->buffer);
|
||||
|
||||
|
||||
@@ -3460,7 +3460,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
|
||||
result = heap_delete(relation, tid,
|
||||
GetCurrentCommandId(true), InvalidSnapshot,
|
||||
true /* wait for commit */ ,
|
||||
&hufd, false /* changingPart */);
|
||||
&hufd, false /* changingPart */ );
|
||||
switch (result)
|
||||
{
|
||||
case HeapTupleSelfUpdated:
|
||||
@@ -4483,29 +4483,31 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
|
||||
* functional index. Compare the new and old values of the indexed
|
||||
* expression to see if we are able to use a HOT update or not.
|
||||
*/
|
||||
static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup)
|
||||
static bool
|
||||
ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup)
|
||||
{
|
||||
ListCell *l;
|
||||
List *indexoidlist = RelationGetIndexList(relation);
|
||||
EState *estate = CreateExecutorState();
|
||||
ExprContext *econtext = GetPerTupleExprContext(estate);
|
||||
ListCell *l;
|
||||
List *indexoidlist = RelationGetIndexList(relation);
|
||||
EState *estate = CreateExecutorState();
|
||||
ExprContext *econtext = GetPerTupleExprContext(estate);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation));
|
||||
bool equals = true;
|
||||
Datum old_values[INDEX_MAX_KEYS];
|
||||
bool old_isnull[INDEX_MAX_KEYS];
|
||||
Datum new_values[INDEX_MAX_KEYS];
|
||||
bool new_isnull[INDEX_MAX_KEYS];
|
||||
int indexno = 0;
|
||||
bool equals = true;
|
||||
Datum old_values[INDEX_MAX_KEYS];
|
||||
bool old_isnull[INDEX_MAX_KEYS];
|
||||
Datum new_values[INDEX_MAX_KEYS];
|
||||
bool new_isnull[INDEX_MAX_KEYS];
|
||||
int indexno = 0;
|
||||
|
||||
econtext->ecxt_scantuple = slot;
|
||||
|
||||
foreach(l, indexoidlist)
|
||||
{
|
||||
if (bms_is_member(indexno, relation->rd_projidx))
|
||||
{
|
||||
Oid indexOid = lfirst_oid(l);
|
||||
Relation indexDesc = index_open(indexOid, AccessShareLock);
|
||||
Oid indexOid = lfirst_oid(l);
|
||||
Relation indexDesc = index_open(indexOid, AccessShareLock);
|
||||
IndexInfo *indexInfo = BuildIndexInfo(indexDesc);
|
||||
int i;
|
||||
int i;
|
||||
|
||||
ResetExprContext(econtext);
|
||||
ExecStoreTuple(oldtup, slot, InvalidBuffer, false);
|
||||
@@ -4532,6 +4534,7 @@ static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple
|
||||
else if (!old_isnull[i])
|
||||
{
|
||||
Form_pg_attribute att = TupleDescAttr(RelationGetDescr(indexDesc), i);
|
||||
|
||||
if (!datumIsEqual(old_values[i], new_values[i], att->attbyval, att->attlen))
|
||||
{
|
||||
equals = false;
|
||||
@@ -6533,8 +6536,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
|
||||
/*
|
||||
* This old multi cannot possibly have members still running, but
|
||||
* verify just in case. If it was a locker only, it can be removed
|
||||
* without any further consideration; but if it contained an update, we
|
||||
* might need to preserve it.
|
||||
* without any further consideration; but if it contained an update,
|
||||
* we might need to preserve it.
|
||||
*/
|
||||
if (MultiXactIdIsRunning(multi,
|
||||
HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
|
||||
@@ -6681,8 +6684,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Not in progress, not committed -- must be aborted or crashed;
|
||||
* we can ignore it.
|
||||
* Not in progress, not committed -- must be aborted or
|
||||
* crashed; we can ignore it.
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -9275,6 +9278,7 @@ heap_redo(XLogReaderState *record)
|
||||
heap_xlog_update(record, false);
|
||||
break;
|
||||
case XLOG_HEAP_TRUNCATE:
|
||||
|
||||
/*
|
||||
* TRUNCATE is a no-op because the actions are already logged as
|
||||
* SMGR WAL records. TRUNCATE WAL record only exists for logical
|
||||
|
||||
@@ -132,31 +132,31 @@ _bt_doinsert(Relation rel, IndexTuple itup,
|
||||
* rightmost leaf, has enough free space to accommodate a new entry and
|
||||
* the insertion key is strictly greater than the first key in this page,
|
||||
* then we can safely conclude that the new key will be inserted in the
|
||||
* cached block. So we simply search within the cached block and insert the
|
||||
* key at the appropriate location. We call it a fastpath.
|
||||
* cached block. So we simply search within the cached block and insert
|
||||
* the key at the appropriate location. We call it a fastpath.
|
||||
*
|
||||
* Testing has revealed, though, that the fastpath can result in increased
|
||||
* contention on the exclusive-lock on the rightmost leaf page. So we
|
||||
* conditionally check if the lock is available. If it's not available then
|
||||
* we simply abandon the fastpath and take the regular path. This makes
|
||||
* sense because unavailability of the lock also signals that some other
|
||||
* backend might be concurrently inserting into the page, thus reducing our
|
||||
* chances to finding an insertion place in this page.
|
||||
* conditionally check if the lock is available. If it's not available
|
||||
* then we simply abandon the fastpath and take the regular path. This
|
||||
* makes sense because unavailability of the lock also signals that some
|
||||
* other backend might be concurrently inserting into the page, thus
|
||||
* reducing our chances to finding an insertion place in this page.
|
||||
*/
|
||||
top:
|
||||
fastpath = false;
|
||||
offset = InvalidOffsetNumber;
|
||||
if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
|
||||
{
|
||||
Size itemsz;
|
||||
Page page;
|
||||
BTPageOpaque lpageop;
|
||||
Size itemsz;
|
||||
Page page;
|
||||
BTPageOpaque lpageop;
|
||||
|
||||
/*
|
||||
* Conditionally acquire exclusive lock on the buffer before doing any
|
||||
* checks. If we don't get the lock, we simply follow slowpath. If we
|
||||
* do get the lock, this ensures that the index state cannot change, as
|
||||
* far as the rightmost part of the index is concerned.
|
||||
* do get the lock, this ensures that the index state cannot change,
|
||||
* as far as the rightmost part of the index is concerned.
|
||||
*/
|
||||
buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
|
||||
|
||||
@@ -173,8 +173,8 @@ top:
|
||||
|
||||
/*
|
||||
* Check if the page is still the rightmost leaf page, has enough
|
||||
* free space to accommodate the new tuple, and the insertion
|
||||
* scan key is strictly greater than the first key on the page.
|
||||
* free space to accommodate the new tuple, and the insertion scan
|
||||
* key is strictly greater than the first key on the page.
|
||||
*/
|
||||
if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) &&
|
||||
!P_IGNORE(lpageop) &&
|
||||
@@ -207,8 +207,8 @@ top:
|
||||
ReleaseBuffer(buf);
|
||||
|
||||
/*
|
||||
* If someone's holding a lock, it's likely to change anyway,
|
||||
* so don't try again until we get an updated rightmost leaf.
|
||||
* If someone's holding a lock, it's likely to change anyway, so
|
||||
* don't try again until we get an updated rightmost leaf.
|
||||
*/
|
||||
RelationSetTargetBlock(rel, InvalidBlockNumber);
|
||||
}
|
||||
@@ -882,22 +882,22 @@ _bt_insertonpg(Relation rel,
|
||||
Buffer rbuf;
|
||||
|
||||
/*
|
||||
* If we're here then a pagesplit is needed. We should never reach here
|
||||
* if we're using the fastpath since we should have checked for all the
|
||||
* required conditions, including the fact that this page has enough
|
||||
* freespace. Note that this routine can in theory deal with the
|
||||
* situation where a NULL stack pointer is passed (that's what would
|
||||
* happen if the fastpath is taken), like it does during crash
|
||||
* If we're here then a pagesplit is needed. We should never reach
|
||||
* here if we're using the fastpath since we should have checked for
|
||||
* all the required conditions, including the fact that this page has
|
||||
* enough freespace. Note that this routine can in theory deal with
|
||||
* the situation where a NULL stack pointer is passed (that's what
|
||||
* would happen if the fastpath is taken), like it does during crash
|
||||
* recovery. But that path is much slower, defeating the very purpose
|
||||
* of the optimization. The following assertion should protect us from
|
||||
* any future code changes that invalidate those assumptions.
|
||||
* of the optimization. The following assertion should protect us
|
||||
* from any future code changes that invalidate those assumptions.
|
||||
*
|
||||
* Note that whenever we fail to take the fastpath, we clear the
|
||||
* cached block. Checking for a valid cached block at this point is
|
||||
* enough to decide whether we're in a fastpath or not.
|
||||
*/
|
||||
Assert(!(P_ISLEAF(lpageop) &&
|
||||
BlockNumberIsValid(RelationGetTargetBlock(rel))));
|
||||
BlockNumberIsValid(RelationGetTargetBlock(rel))));
|
||||
|
||||
/* Choose the split point */
|
||||
firstright = _bt_findsplitloc(rel, page,
|
||||
@@ -936,7 +936,7 @@ _bt_insertonpg(Relation rel,
|
||||
BTMetaPageData *metad = NULL;
|
||||
OffsetNumber itup_off;
|
||||
BlockNumber itup_blkno;
|
||||
BlockNumber cachedBlock = InvalidBlockNumber;
|
||||
BlockNumber cachedBlock = InvalidBlockNumber;
|
||||
|
||||
itup_off = newitemoff;
|
||||
itup_blkno = BufferGetBlockNumber(buf);
|
||||
@@ -1093,7 +1093,8 @@ _bt_insertonpg(Relation rel,
|
||||
* We do this after dropping locks on all buffers. So the information
|
||||
* about whether the insertion block is still the rightmost block or
|
||||
* not may have changed in between. But we will deal with that during
|
||||
* next insert operation. No special care is required while setting it.
|
||||
* next insert operation. No special care is required while setting
|
||||
* it.
|
||||
*/
|
||||
if (BlockNumberIsValid(cachedBlock) &&
|
||||
_bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)
|
||||
|
||||
@@ -155,11 +155,11 @@ void
|
||||
_bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact,
|
||||
float8 numHeapTuples)
|
||||
{
|
||||
Buffer metabuf;
|
||||
Page metapg;
|
||||
Buffer metabuf;
|
||||
Page metapg;
|
||||
BTMetaPageData *metad;
|
||||
bool needsRewrite = false;
|
||||
XLogRecPtr recptr;
|
||||
bool needsRewrite = false;
|
||||
XLogRecPtr recptr;
|
||||
|
||||
/* read the metapage and check if it needs rewrite */
|
||||
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
|
||||
|
||||
@@ -785,10 +785,10 @@ _bt_parallel_advance_array_keys(IndexScanDesc scan)
|
||||
static bool
|
||||
_bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
|
||||
{
|
||||
Buffer metabuf;
|
||||
Page metapg;
|
||||
Buffer metabuf;
|
||||
Page metapg;
|
||||
BTMetaPageData *metad;
|
||||
bool result = false;
|
||||
bool result = false;
|
||||
|
||||
metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ);
|
||||
metapg = BufferGetPage(metabuf);
|
||||
@@ -814,8 +814,8 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
|
||||
}
|
||||
else
|
||||
{
|
||||
StdRdOptions *relopts;
|
||||
float8 cleanup_scale_factor;
|
||||
StdRdOptions *relopts;
|
||||
float8 cleanup_scale_factor;
|
||||
|
||||
/*
|
||||
* If table receives large enough amount of insertions and no cleanup
|
||||
@@ -825,14 +825,14 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
|
||||
*/
|
||||
relopts = (StdRdOptions *) info->index->rd_options;
|
||||
cleanup_scale_factor = (relopts &&
|
||||
relopts->vacuum_cleanup_index_scale_factor >= 0)
|
||||
? relopts->vacuum_cleanup_index_scale_factor
|
||||
: vacuum_cleanup_index_scale_factor;
|
||||
relopts->vacuum_cleanup_index_scale_factor >= 0)
|
||||
? relopts->vacuum_cleanup_index_scale_factor
|
||||
: vacuum_cleanup_index_scale_factor;
|
||||
|
||||
if (cleanup_scale_factor < 0 ||
|
||||
metad->btm_last_cleanup_num_heap_tuples < 0 ||
|
||||
info->num_heap_tuples > (1.0 + cleanup_scale_factor) *
|
||||
metad->btm_last_cleanup_num_heap_tuples)
|
||||
metad->btm_last_cleanup_num_heap_tuples)
|
||||
result = true;
|
||||
}
|
||||
|
||||
@@ -862,7 +862,7 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
/* The ENSURE stuff ensures we clean up shared memory on failure */
|
||||
PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel));
|
||||
{
|
||||
TransactionId oldestBtpoXact;
|
||||
TransactionId oldestBtpoXact;
|
||||
|
||||
cycleid = _bt_start_vacuum(rel);
|
||||
|
||||
@@ -907,7 +907,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
|
||||
*/
|
||||
if (stats == NULL)
|
||||
{
|
||||
TransactionId oldestBtpoXact;
|
||||
TransactionId oldestBtpoXact;
|
||||
|
||||
/* Check if we need a cleanup */
|
||||
if (!_bt_vacuum_needs_cleanup(info))
|
||||
|
||||
@@ -897,10 +897,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
|
||||
/*
|
||||
* Truncate any non-key attributes from high key on leaf level
|
||||
* (i.e. truncate on leaf level if we're building an INCLUDE
|
||||
* index). This is only done at the leaf level because
|
||||
* downlinks in internal pages are either negative infinity
|
||||
* items, or get their contents from copying from one level
|
||||
* down. See also: _bt_split().
|
||||
* index). This is only done at the leaf level because downlinks
|
||||
* in internal pages are either negative infinity items, or get
|
||||
* their contents from copying from one level down. See also:
|
||||
* _bt_split().
|
||||
*
|
||||
* Since the truncated tuple is probably smaller than the
|
||||
* original, it cannot just be copied in place (besides, we want
|
||||
@@ -908,11 +908,11 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
|
||||
* original high key, and add our own truncated high key at the
|
||||
* same offset.
|
||||
*
|
||||
* Note that the page layout won't be changed very much. oitup
|
||||
* is already located at the physical beginning of tuple space,
|
||||
* so we only shift the line pointer array back and forth, and
|
||||
* overwrite the latter portion of the space occupied by the
|
||||
* original tuple. This is fairly cheap.
|
||||
* Note that the page layout won't be changed very much. oitup is
|
||||
* already located at the physical beginning of tuple space, so we
|
||||
* only shift the line pointer array back and forth, and overwrite
|
||||
* the latter portion of the space occupied by the original tuple.
|
||||
* This is fairly cheap.
|
||||
*/
|
||||
truncated = _bt_nonkey_truncate(wstate->index, oitup);
|
||||
truncsz = IndexTupleSize(truncated);
|
||||
@@ -978,7 +978,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
|
||||
*/
|
||||
if (last_off == P_HIKEY)
|
||||
{
|
||||
BTPageOpaque npageop;
|
||||
BTPageOpaque npageop;
|
||||
|
||||
Assert(state->btps_minkey == NULL);
|
||||
|
||||
|
||||
@@ -2101,12 +2101,12 @@ btproperty(Oid index_oid, int attno,
|
||||
IndexTuple
|
||||
_bt_nonkey_truncate(Relation rel, IndexTuple itup)
|
||||
{
|
||||
int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
|
||||
IndexTuple truncated;
|
||||
int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
|
||||
IndexTuple truncated;
|
||||
|
||||
/*
|
||||
* We should only ever truncate leaf index tuples, which must have both key
|
||||
* and non-key attributes. It's never okay to truncate a second time.
|
||||
* We should only ever truncate leaf index tuples, which must have both
|
||||
* key and non-key attributes. It's never okay to truncate a second time.
|
||||
*/
|
||||
Assert(BTreeTupleGetNAtts(itup, rel) ==
|
||||
IndexRelationGetNumberOfAttributes(rel));
|
||||
@@ -2133,10 +2133,10 @@ _bt_nonkey_truncate(Relation rel, IndexTuple itup)
|
||||
bool
|
||||
_bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
|
||||
{
|
||||
int16 natts = IndexRelationGetNumberOfAttributes(rel);
|
||||
int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
|
||||
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
IndexTuple itup;
|
||||
int16 natts = IndexRelationGetNumberOfAttributes(rel);
|
||||
int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
|
||||
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
IndexTuple itup;
|
||||
|
||||
/*
|
||||
* We cannot reliably test a deleted or half-deleted page, since they have
|
||||
@@ -2147,6 +2147,7 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
|
||||
|
||||
Assert(offnum >= FirstOffsetNumber &&
|
||||
offnum <= PageGetMaxOffsetNumber(page));
|
||||
|
||||
/*
|
||||
* Mask allocated for number of keys in index tuple must be able to fit
|
||||
* maximum possible number of index attributes
|
||||
@@ -2178,29 +2179,29 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
|
||||
return BTreeTupleGetNAtts(itup, rel) == nkeyatts;
|
||||
}
|
||||
}
|
||||
else /* !P_ISLEAF(opaque) */
|
||||
else /* !P_ISLEAF(opaque) */
|
||||
{
|
||||
if (offnum == P_FIRSTDATAKEY(opaque))
|
||||
{
|
||||
/*
|
||||
* The first tuple on any internal page (possibly the first after
|
||||
* its high key) is its negative infinity tuple. Negative infinity
|
||||
* tuples are always truncated to zero attributes. They are a
|
||||
* particular kind of pivot tuple.
|
||||
* its high key) is its negative infinity tuple. Negative
|
||||
* infinity tuples are always truncated to zero attributes. They
|
||||
* are a particular kind of pivot tuple.
|
||||
*
|
||||
* The number of attributes won't be explicitly represented if the
|
||||
* negative infinity tuple was generated during a page split that
|
||||
* occurred with a version of Postgres before v11. There must be a
|
||||
* problem when there is an explicit representation that is
|
||||
* occurred with a version of Postgres before v11. There must be
|
||||
* a problem when there is an explicit representation that is
|
||||
* non-zero, or when there is no explicit representation and the
|
||||
* tuple is evidently not a pre-pg_upgrade tuple.
|
||||
*
|
||||
* Prior to v11, downlinks always had P_HIKEY as their offset. Use
|
||||
* that to decide if the tuple is a pre-v11 tuple.
|
||||
* Prior to v11, downlinks always had P_HIKEY as their offset.
|
||||
* Use that to decide if the tuple is a pre-v11 tuple.
|
||||
*/
|
||||
return BTreeTupleGetNAtts(itup, rel) == 0 ||
|
||||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
|
||||
ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
|
||||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
|
||||
ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -1908,11 +1908,12 @@ spgdoinsert(Relation index, SpGistState *state,
|
||||
/*
|
||||
* Prepare the leaf datum to insert.
|
||||
*
|
||||
* If an optional "compress" method is provided, then call it to form
|
||||
* the leaf datum from the input datum. Otherwise store the input datum as
|
||||
* is. Since we don't use index_form_tuple in this AM, we have to make sure
|
||||
* value to be inserted is not toasted; FormIndexDatum doesn't guarantee
|
||||
* that. But we assume the "compress" method to return an untoasted value.
|
||||
* If an optional "compress" method is provided, then call it to form the
|
||||
* leaf datum from the input datum. Otherwise store the input datum as
|
||||
* is. Since we don't use index_form_tuple in this AM, we have to make
|
||||
* sure value to be inserted is not toasted; FormIndexDatum doesn't
|
||||
* guarantee that. But we assume the "compress" method to return an
|
||||
* untoasted value.
|
||||
*/
|
||||
if (!isnull)
|
||||
{
|
||||
|
||||
@@ -53,7 +53,7 @@ spgvalidate(Oid opclassoid)
|
||||
OpFamilyOpFuncGroup *opclassgroup;
|
||||
int i;
|
||||
ListCell *lc;
|
||||
spgConfigIn configIn;
|
||||
spgConfigIn configIn;
|
||||
spgConfigOut configOut;
|
||||
Oid configOutLefttype = InvalidOid;
|
||||
Oid configOutRighttype = InvalidOid;
|
||||
@@ -119,9 +119,9 @@ spgvalidate(Oid opclassoid)
|
||||
configOutRighttype = procform->amprocrighttype;
|
||||
|
||||
/*
|
||||
* When leaf and attribute types are the same, compress function
|
||||
* is not required and we set corresponding bit in functionset
|
||||
* for later group consistency check.
|
||||
* When leaf and attribute types are the same, compress
|
||||
* function is not required and we set corresponding bit in
|
||||
* functionset for later group consistency check.
|
||||
*/
|
||||
if (!OidIsValid(configOut.leafType) ||
|
||||
configOut.leafType == configIn.attType)
|
||||
|
||||
@@ -913,7 +913,7 @@ typedef struct TwoPhaseFileHeader
|
||||
bool initfileinval; /* does relcache init file need invalidation? */
|
||||
uint16 gidlen; /* length of the GID - GID follows the header */
|
||||
XLogRecPtr origin_lsn; /* lsn of this record at origin node */
|
||||
TimestampTz origin_timestamp; /* time of prepare at origin node */
|
||||
TimestampTz origin_timestamp; /* time of prepare at origin node */
|
||||
} TwoPhaseFileHeader;
|
||||
|
||||
/*
|
||||
@@ -1065,7 +1065,7 @@ EndPrepare(GlobalTransaction gxact)
|
||||
{
|
||||
TwoPhaseFileHeader *hdr;
|
||||
StateFileChunk *record;
|
||||
bool replorigin;
|
||||
bool replorigin;
|
||||
|
||||
/* Add the end sentinel to the list of 2PC records */
|
||||
RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0,
|
||||
@@ -1317,7 +1317,7 @@ void
|
||||
ParsePrepareRecord(uint8 info, char *xlrec, xl_xact_parsed_prepare *parsed)
|
||||
{
|
||||
TwoPhaseFileHeader *hdr;
|
||||
char *bufptr;
|
||||
char *bufptr;
|
||||
|
||||
hdr = (TwoPhaseFileHeader *) xlrec;
|
||||
bufptr = xlrec + MAXALIGN(sizeof(TwoPhaseFileHeader));
|
||||
|
||||
@@ -3267,8 +3267,8 @@ bool
|
||||
IsInTransactionBlock(bool isTopLevel)
|
||||
{
|
||||
/*
|
||||
* Return true on same conditions that would make PreventInTransactionBlock
|
||||
* error out
|
||||
* Return true on same conditions that would make
|
||||
* PreventInTransactionBlock error out
|
||||
*/
|
||||
if (IsTransactionBlock())
|
||||
return true;
|
||||
@@ -5448,9 +5448,9 @@ XactLogAbortRecord(TimestampTz abort_time,
|
||||
}
|
||||
|
||||
/* dump transaction origin information only for abort prepared */
|
||||
if ( (replorigin_session_origin != InvalidRepOriginId) &&
|
||||
TransactionIdIsValid(twophase_xid) &&
|
||||
XLogLogicalInfoActive())
|
||||
if ((replorigin_session_origin != InvalidRepOriginId) &&
|
||||
TransactionIdIsValid(twophase_xid) &&
|
||||
XLogLogicalInfoActive())
|
||||
{
|
||||
xl_xinfo.xinfo |= XACT_XINFO_HAS_ORIGIN;
|
||||
|
||||
|
||||
@@ -10656,10 +10656,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
||||
* Mark that start phase has correctly finished for an exclusive backup.
|
||||
* Session-level locks are updated as well to reflect that state.
|
||||
*
|
||||
* Note that CHECK_FOR_INTERRUPTS() must not occur while updating
|
||||
* backup counters and session-level lock. Otherwise they can be
|
||||
* updated inconsistently, and which might cause do_pg_abort_backup()
|
||||
* to fail.
|
||||
* Note that CHECK_FOR_INTERRUPTS() must not occur while updating backup
|
||||
* counters and session-level lock. Otherwise they can be updated
|
||||
* inconsistently, and which might cause do_pg_abort_backup() to fail.
|
||||
*/
|
||||
if (exclusive)
|
||||
{
|
||||
@@ -10904,11 +10903,11 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
||||
/*
|
||||
* Clean up session-level lock.
|
||||
*
|
||||
* You might think that WALInsertLockRelease() can be called
|
||||
* before cleaning up session-level lock because session-level
|
||||
* lock doesn't need to be protected with WAL insertion lock.
|
||||
* But since CHECK_FOR_INTERRUPTS() can occur in it,
|
||||
* session-level lock must be cleaned up before it.
|
||||
* You might think that WALInsertLockRelease() can be called before
|
||||
* cleaning up session-level lock because session-level lock doesn't need
|
||||
* to be protected with WAL insertion lock. But since
|
||||
* CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be
|
||||
* cleaned up before it.
|
||||
*/
|
||||
sessionBackupState = SESSION_BACKUP_NONE;
|
||||
|
||||
@@ -11042,6 +11041,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
||||
(uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
|
||||
fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
|
||||
(uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
|
||||
|
||||
/*
|
||||
* Transfer remaining lines including label and start timeline to
|
||||
* history file.
|
||||
@@ -11259,7 +11259,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
bool *backupFromStandby)
|
||||
{
|
||||
char startxlogfilename[MAXFNAMELEN];
|
||||
TimeLineID tli_from_walseg, tli_from_file;
|
||||
TimeLineID tli_from_walseg,
|
||||
tli_from_file;
|
||||
FILE *lfp;
|
||||
char ch;
|
||||
char backuptype[20];
|
||||
@@ -11322,13 +11323,13 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse START TIME and LABEL. Those are not mandatory fields for
|
||||
* recovery but checking for their presence is useful for debugging
|
||||
* and the next sanity checks. Cope also with the fact that the
|
||||
* result buffers have a pre-allocated size, hence if the backup_label
|
||||
* file has been generated with strings longer than the maximum assumed
|
||||
* here an incorrect parsing happens. That's fine as only minor
|
||||
* consistency checks are done afterwards.
|
||||
* Parse START TIME and LABEL. Those are not mandatory fields for recovery
|
||||
* but checking for their presence is useful for debugging and the next
|
||||
* sanity checks. Cope also with the fact that the result buffers have a
|
||||
* pre-allocated size, hence if the backup_label file has been generated
|
||||
* with strings longer than the maximum assumed here an incorrect parsing
|
||||
* happens. That's fine as only minor consistency checks are done
|
||||
* afterwards.
|
||||
*/
|
||||
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
|
||||
ereport(DEBUG1,
|
||||
@@ -11341,8 +11342,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
backuplabel, BACKUP_LABEL_FILE)));
|
||||
|
||||
/*
|
||||
* START TIMELINE is new as of 11. Its parsing is not mandatory, still
|
||||
* use it as a sanity check if present.
|
||||
* START TIMELINE is new as of 11. Its parsing is not mandatory, still use
|
||||
* it as a sanity check if present.
|
||||
*/
|
||||
if (fscanf(lfp, "START TIMELINE: %u\n", &tli_from_file) == 1)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user