1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-07 11:02:12 +03:00

Initial pgindent run for v12.

This is still using the 2.0 version of pg_bsd_indent.
I thought it would be good to commit this separately,
so as to document the differences between 2.0 and 2.1 behavior.

Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us
This commit is contained in:
Tom Lane 2019-05-22 12:55:34 -04:00
parent 66a4bad83a
commit be76af171c
221 changed files with 1433 additions and 1302 deletions

View File

@ -1851,7 +1851,8 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
bool *isnull, bool tupleIsAlive, void *checkstate) bool *isnull, bool tupleIsAlive, void *checkstate)
{ {
BtreeCheckState *state = (BtreeCheckState *) checkstate; BtreeCheckState *state = (BtreeCheckState *) checkstate;
IndexTuple itup, norm; IndexTuple itup,
norm;
Assert(state->heapallindexed); Assert(state->heapallindexed);

View File

@ -922,8 +922,11 @@ check_selective_binary_conversion(RelOptInfo *baserel,
/* Skip dropped attributes (probably shouldn't see any here). */ /* Skip dropped attributes (probably shouldn't see any here). */
if (attr->attisdropped) if (attr->attisdropped)
continue; continue;
/* Skip generated columns (COPY won't accept them in the column
* list) */ /*
* Skip generated columns (COPY won't accept them in the column
* list)
*/
if (attr->attgenerated) if (attr->attgenerated)
continue; continue;
*columns = lappend(*columns, makeString(pstrdup(attname))); *columns = lappend(*columns, makeString(pstrdup(attname)));

View File

@ -214,39 +214,42 @@ g_int_compress(PG_FUNCTION_ARGS)
{ {
int r_end = dr[i]; int r_end = dr[i];
int r_start = r_end; int r_start = r_end;
while (i > 0 && lenr > 0 && dr[i-1] == r_start - 1)
while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
--r_start, --i, --lenr; --r_start, --i, --lenr;
dr[2*j] = r_start; dr[2 * j] = r_start;
dr[2*j+1] = r_end; dr[2 * j + 1] = r_end;
} }
/* just copy the rest, if any, as trivial ranges */ /* just copy the rest, if any, as trivial ranges */
for (; i >= 0; i--, j--) for (; i >= 0; i--, j--)
dr[2*j] = dr[2*j + 1] = dr[i]; dr[2 * j] = dr[2 * j + 1] = dr[i];
if (++j) if (++j)
{ {
/* /*
* shunt everything down to start at the right place * shunt everything down to start at the right place
*/ */
memmove((void *) &dr[0], (void *) &dr[2*j], 2*(len - j) * sizeof(int32)); memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
} }
/* /*
* make "len" be number of array elements, not ranges * make "len" be number of array elements, not ranges
*/ */
len = 2*(len - j); len = 2 * (len - j);
cand = 1; cand = 1;
while (len > MAXNUMRANGE * 2) while (len > MAXNUMRANGE * 2)
{ {
min = PG_INT64_MAX; min = PG_INT64_MAX;
for (i = 2; i < len; i += 2) for (i = 2; i < len; i += 2)
if (min > ((int64)dr[i] - (int64)dr[i - 1])) if (min > ((int64) dr[i] - (int64) dr[i - 1]))
{ {
min = ((int64)dr[i] - (int64)dr[i - 1]); min = ((int64) dr[i] - (int64) dr[i - 1]);
cand = i; cand = i;
} }
memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32)); memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
len -= 2; len -= 2;
} }
/* /*
* check sparseness of result * check sparseness of result
*/ */

View File

@ -298,10 +298,10 @@ internal_size(int *a, int len)
for (i = 0; i < len; i += 2) for (i = 0; i < len; i += 2)
{ {
if (!i || a[i] != a[i - 1]) /* do not count repeated range */ if (!i || a[i] != a[i - 1]) /* do not count repeated range */
size += (int64)(a[i + 1]) - (int64)(a[i]) + 1; size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
} }
if (size > (int64)INT_MAX || size < (int64)INT_MIN) if (size > (int64) INT_MAX || size < (int64) INT_MIN)
return -1; /* overflow */ return -1; /* overflow */
return (int) size; return (int) size;
} }

View File

@ -1153,8 +1153,9 @@ pgss_store(const char *query, uint64 queryId,
queryId = pgss_hash_string(query, query_len); queryId = pgss_hash_string(query, query_len);
/* /*
* If we are unlucky enough to get a hash of zero(invalid), use queryID * If we are unlucky enough to get a hash of zero(invalid), use
* as 2 instead, queryID 1 is already in use for normal statements. * queryID as 2 instead, queryID 1 is already in use for normal
* statements.
*/ */
if (queryId == UINT64CONST(0)) if (queryId == UINT64CONST(0))
queryId = UINT64CONST(2); queryId = UINT64CONST(2);

View File

@ -1138,7 +1138,7 @@ mp_int_mod(mp_int a, mp_int m, mp_int c)
} }
mp_result mp_result
mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r) mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
{ {
mpz_t vtmp; mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)]; mp_digit vbuf[MP_VALUE_DIGITS(value)];
@ -1819,7 +1819,7 @@ mp_int_root(mp_int a, mp_small b, mp_int c)
} }
mp_result mp_result
mp_int_to_int(mp_int z, mp_small * out) mp_int_to_int(mp_int z, mp_small *out)
{ {
assert(z != NULL); assert(z != NULL);
@ -1850,7 +1850,7 @@ mp_int_to_int(mp_int z, mp_small * out)
} }
mp_result mp_result
mp_int_to_uint(mp_int z, mp_usmall * out) mp_int_to_uint(mp_int z, mp_usmall *out)
{ {
assert(z != NULL); assert(z != NULL);

View File

@ -218,7 +218,7 @@ mp_result mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r);
/** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by /** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
powers of 2 is detected and handled efficiently. The remainder is pinned to powers of 2 is detected and handled efficiently. The remainder is pinned to
`0 <= *r < b`. Either of `q` or `r` may be NULL. */ `0 <= *r < b`. Either of `q` or `r` may be NULL. */
mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r); mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
/** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a /** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
special case for division by powers of two that is more efficient than special case for division by powers of two that is more efficient than
@ -246,7 +246,7 @@ mp_result mp_int_expt_full(mp_int a, mp_int b, mp_int c);
The remainder is pinned to `0 <= r < value`. */ The remainder is pinned to `0 <= r < value`. */
static inline static inline
mp_result mp_result
mp_int_mod_value(mp_int a, mp_small value, mp_small * r) mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
{ {
return mp_int_div_value(a, value, 0, r); return mp_int_div_value(a, value, 0, r);
} }
@ -339,11 +339,11 @@ mp_int_sqrt(mp_int a, mp_int c)
/** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`. /** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */ If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
mp_result mp_int_to_int(mp_int z, mp_small * out); mp_result mp_int_to_int(mp_int z, mp_small *out);
/** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`. /** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */ If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
mp_result mp_int_to_uint(mp_int z, mp_usmall * out); mp_result mp_int_to_uint(mp_int z, mp_usmall *out);
/** Converts `z` to a zero-terminated string of characters in the specified /** Converts `z` to a zero-terminated string of characters in the specified
`radix`, writing at most `limit` characters to `str` including the `radix`, writing at most `limit` characters to `str` including the

View File

@ -1934,11 +1934,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
bool doNothing = false; bool doNothing = false;
/* /*
* If the foreign table we are about to insert routed rows into is also * If the foreign table we are about to insert routed rows into is also an
* an UPDATE subplan result rel that will be updated later, proceeding * UPDATE subplan result rel that will be updated later, proceeding with
* with the INSERT will result in the later UPDATE incorrectly modifying * the INSERT will result in the later UPDATE incorrectly modifying those
* those routed rows, so prevent the INSERT --- it would be nice if we * routed rows, so prevent the INSERT --- it would be nice if we could
* could handle this case; but for now, throw an error for safety. * handle this case; but for now, throw an error for safety.
*/ */
if (plan && plan->operation == CMD_UPDATE && if (plan && plan->operation == CMD_UPDATE &&
(resultRelInfo->ri_usesFdwDirectModify || (resultRelInfo->ri_usesFdwDirectModify ||
@ -3773,6 +3773,7 @@ store_returning_result(PgFdwModifyState *fmstate,
fmstate->retrieved_attrs, fmstate->retrieved_attrs,
NULL, NULL,
fmstate->temp_cxt); fmstate->temp_cxt);
/* /*
* The returning slot will not necessarily be suitable to store * The returning slot will not necessarily be suitable to store
* heaptuples directly, so allow for conversion. * heaptuples directly, so allow for conversion.
@ -6059,8 +6060,8 @@ add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
/* /*
* Grouping and aggregation are not supported with FOR UPDATE/SHARE, * Grouping and aggregation are not supported with FOR UPDATE/SHARE,
* so the input_rel should be a base, join, or ordered relation; and * so the input_rel should be a base, join, or ordered relation; and
* if it's an ordered relation, its input relation should be a base * if it's an ordered relation, its input relation should be a base or
* or join relation. * join relation.
*/ */
Assert(input_rel->reloptkind == RELOPT_BASEREL || Assert(input_rel->reloptkind == RELOPT_BASEREL ||
input_rel->reloptkind == RELOPT_JOINREL || input_rel->reloptkind == RELOPT_JOINREL ||

View File

@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple,
} }
/* /*
* Now walk the missing attributes. If there is a missing value * Now walk the missing attributes. If there is a missing value make
* make space for it. Otherwise, it's going to be NULL. * space for it. Otherwise, it's going to be NULL.
*/ */
for (attnum = firstmissingnum; for (attnum = firstmissingnum;
attnum < natts; attnum < natts;

View File

@ -403,8 +403,8 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
RBM_NORMAL, gvs->strategy); RBM_NORMAL, gvs->strategy);
/* /*
* Lock posting tree root for cleanup to ensure there are no concurrent * Lock posting tree root for cleanup to ensure there are no
* inserts. * concurrent inserts.
*/ */
LockBufferForCleanup(buffer); LockBufferForCleanup(buffer);

View File

@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
while (segno < a_segno) while (segno < a_segno)
{ {
/* /*
* Once modification is started and page tail is copied, we've * Once modification is started and page tail is copied, we've to
* to copy unmodified segments. * copy unmodified segments.
*/ */
segsize = SizeOfGinPostingList(oldseg); segsize = SizeOfGinPostingList(oldseg);
if (tailCopy) if (tailCopy)
@ -257,8 +257,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
} }
/* /*
* We're about to start modification of the page. So, copy tail of the * We're about to start modification of the page. So, copy tail of
* page if it's not done already. * the page if it's not done already.
*/ */
if (!tailCopy && segptr != segmentend) if (!tailCopy && segptr != segmentend)
{ {

View File

@ -839,16 +839,16 @@ gistNewBuffer(Relation r)
gistcheckpage(r, buffer); gistcheckpage(r, buffer);
/* /*
* Otherwise, recycle it if deleted, and too old to have any processes * Otherwise, recycle it if deleted, and too old to have any
* interested in it. * processes interested in it.
*/ */
if (gistPageRecyclable(page)) if (gistPageRecyclable(page))
{ {
/* /*
* If we are generating WAL for Hot Standby then create a * If we are generating WAL for Hot Standby then create a WAL
* WAL record that will allow us to conflict with queries * record that will allow us to conflict with queries running
* running on standby, in case they have snapshots older * on standby, in case they have snapshots older than the
* than the page's deleteXid. * page's deleteXid.
*/ */
if (XLogStandbyInfoActive() && RelationNeedsWAL(r)) if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page)); gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));

View File

@ -474,6 +474,7 @@ tuple_lock_retry:
HeapTupleHeaderGetCmin(tuple->t_data) >= cid) HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
{ {
tmfd->xmax = priorXmax; tmfd->xmax = priorXmax;
/* /*
* Cmin is the problematic value, so store that. See * Cmin is the problematic value, so store that. See
* above. * above.

View File

@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* cheaper to get rid of it in the next pruning pass than * cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple. Finally, if index * to treat it like an indexed tuple. Finally, if index
* cleanup is disabled, the second heap pass will not * cleanup is disabled, the second heap pass will not
* execute, and the tuple will not get removed, so we * execute, and the tuple will not get removed, so we must
* must treat it like any other dead tuple that we choose * treat it like any other dead tuple that we choose to
* to keep. * keep.
* *
* If this were to happen for a tuple that actually needed * If this were to happen for a tuple that actually needed
* to be deleted, we'd be in trouble, because it'd * to be deleted, we'd be in trouble, because it'd
@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
all_visible = false; all_visible = false;
break; break;
case HEAPTUPLE_LIVE: case HEAPTUPLE_LIVE:
/* /*
* Count it as live. Not only is this natural, but it's * Count it as live. Not only is this natural, but it's
* also what acquire_sample_rows() does. * also what acquire_sample_rows() does.
@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
else else
{ {
/* /*
* Here, we have indexes but index cleanup is disabled. Instead of * Here, we have indexes but index cleanup is disabled.
* vacuuming the dead tuples on the heap, we just forget them. * Instead of vacuuming the dead tuples on the heap, we just
* forget them.
* *
* Note that vacrelstats->dead_tuples could have tuples which * Note that vacrelstats->dead_tuples could have tuples which
* became dead after HOT-pruning but are not marked dead yet. * became dead after HOT-pruning but are not marked dead yet.
* We do not process them because it's a very rare condition, and * We do not process them because it's a very rare condition,
* the next vacuum will process them anyway. * and the next vacuum will process them anyway.
*/ */
Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED); Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
} }

View File

@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel,
/* /*
* Re-find and write lock the parent of buf. * Re-find and write lock the parent of buf.
* *
* It's possible that the location of buf's downlink has changed * It's possible that the location of buf's downlink has changed since
* since our initial _bt_search() descent. _bt_getstackbuf() will * our initial _bt_search() descent. _bt_getstackbuf() will detect
* detect and recover from this, updating the stack, which ensures * and recover from this, updating the stack, which ensures that the
* that the new downlink will be inserted at the correct offset. * new downlink will be inserted at the correct offset. Even buf's
* Even buf's parent may have changed. * parent may have changed.
*/ */
stack->bts_btentry = bknum; stack->bts_btentry = bknum;
pbuf = _bt_getstackbuf(rel, stack); pbuf = _bt_getstackbuf(rel, stack);

View File

@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
new_stack->bts_parent = stack_in; new_stack->bts_parent = stack_in;
/* /*
* Page level 1 is lowest non-leaf page level prior to leaves. So, * Page level 1 is lowest non-leaf page level prior to leaves. So, if
* if we're on the level 1 and asked to lock leaf page in write mode, * we're on the level 1 and asked to lock leaf page in write mode,
* then lock next page in write mode, because it must be a leaf. * then lock next page in write mode, because it must be a leaf.
*/ */
if (opaque->btpo.level == 1 && access == BT_WRITE) if (opaque->btpo.level == 1 && access == BT_WRITE)
@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* Initialize remaining insertion scan key fields */ /* Initialize remaining insertion scan key fields */
inskey.heapkeyspace = _bt_heapkeyspace(rel); inskey.heapkeyspace = _bt_heapkeyspace(rel);
inskey.anynullkeys = false; /* unusued */ inskey.anynullkeys = false; /* unused */
inskey.nextkey = nextkey; inskey.nextkey = nextkey;
inskey.pivotsearch = false; inskey.pivotsearch = false;
inskey.scantid = NULL; inskey.scantid = NULL;

View File

@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
* much smaller. * much smaller.
* *
* Since the truncated tuple is often smaller than the original * Since the truncated tuple is often smaller than the original
* tuple, it cannot just be copied in place (besides, we want * tuple, it cannot just be copied in place (besides, we want to
* to actually save space on the leaf page). We delete the * actually save space on the leaf page). We delete the original
* original high key, and add our own truncated high key at the * high key, and add our own truncated high key at the same
* same offset. * offset.
* *
* Note that the page layout won't be changed very much. oitup is * Note that the page layout won't be changed very much. oitup is
* already located at the physical beginning of tuple space, so we * already located at the physical beginning of tuple space, so we

View File

@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
} }
static void static void
spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item) spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
{ {
if (!so->state.attLeafType.attbyval && if (!so->state.attLeafType.attbyval &&
DatumGetPointer(item->value) != NULL) DatumGetPointer(item->value) != NULL)
@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
* Called in queue context * Called in queue context
*/ */
static void static void
spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item) spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
{ {
pairingheap_add(so->scanQueue, &item->phNode); pairingheap_add(so->scanQueue, &item->phNode);
} }
@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
* the scan is not ordered AND the item satisfies the scankeys * the scan is not ordered AND the item satisfies the scankeys
*/ */
static bool static bool
spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item, spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistLeafTuple leafTuple, bool isnull, SpGistLeafTuple leafTuple, bool isnull,
bool *reportedSome, storeRes_func storeRes) bool *reportedSome, storeRes_func storeRes)
{ {
@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
static void static void
spgInitInnerConsistentIn(spgInnerConsistentIn *in, spgInitInnerConsistentIn(spgInnerConsistentIn *in,
SpGistScanOpaque so, SpGistScanOpaque so,
SpGistSearchItem * item, SpGistSearchItem *item,
SpGistInnerTuple innerTuple) SpGistInnerTuple innerTuple)
{ {
in->scankeys = so->keyData; in->scankeys = so->keyData;
@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in,
static SpGistSearchItem * static SpGistSearchItem *
spgMakeInnerItem(SpGistScanOpaque so, spgMakeInnerItem(SpGistScanOpaque so,
SpGistSearchItem * parentItem, SpGistSearchItem *parentItem,
SpGistNodeTuple tuple, SpGistNodeTuple tuple,
spgInnerConsistentOut *out, int i, bool isnull, spgInnerConsistentOut *out, int i, bool isnull,
double *distances) double *distances)
@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so,
} }
static void static void
spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item, spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistInnerTuple innerTuple, bool isnull) SpGistInnerTuple innerTuple, bool isnull)
{ {
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt); MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers
static OffsetNumber static OffsetNumber
spgTestLeafTuple(SpGistScanOpaque so, spgTestLeafTuple(SpGistScanOpaque so,
SpGistSearchItem * item, SpGistSearchItem *item,
Page page, OffsetNumber offset, Page page, OffsetNumber offset,
bool isnull, bool isroot, bool isnull, bool isroot,
bool *reportedSome, bool *reportedSome,

View File

@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* happened since VACUUM started. * happened since VACUUM started.
* *
* Note: we could make a tighter test by seeing if the xid is * Note: we could make a tighter test by seeing if the xid is
* "running" according to the active snapshot; but snapmgr.c doesn't * "running" according to the active snapshot; but snapmgr.c
* currently export a suitable API, and it's not entirely clear * doesn't currently export a suitable API, and it's not entirely
* that a tighter test is worth the cycles anyway. * clear that a tighter test is worth the cycles anyway.
*/ */
if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin)) if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
spgAddPendingTID(bds, &dt->pointer); spgAddPendingTID(bds, &dt->pointer);

View File

@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s)
/* /*
* Ensure parent(s) have XIDs, so that a child always has an XID later * Ensure parent(s) have XIDs, so that a child always has an XID later
* than its parent. Mustn't recurse here, or we might get a stack overflow * than its parent. Mustn't recurse here, or we might get a stack
* if we're at the bottom of a huge stack of subtransactions none of which * overflow if we're at the bottom of a huge stack of subtransactions none
* have XIDs yet. * of which have XIDs yet.
*/ */
if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId)) if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
{ {

View File

@ -2550,8 +2550,8 @@ AddRelationNewConstraints(Relation rel,
/* /*
* If the expression is just a NULL constant, we do not bother to make * If the expression is just a NULL constant, we do not bother to make
* an explicit pg_attrdef entry, since the default behavior is * an explicit pg_attrdef entry, since the default behavior is
* equivalent. This applies to column defaults, but not for generation * equivalent. This applies to column defaults, but not for
* expressions. * generation expressions.
* *
* Note a nonobvious property of this test: if the column is of a * Note a nonobvious property of this test: if the column is of a
* domain type, what we'll get is not a bare null Const but a * domain type, what we'll get is not a bare null Const but a

View File

@ -1236,8 +1236,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
Anum_pg_class_reloptions, &isnull); Anum_pg_class_reloptions, &isnull);
/* /*
* Extract the list of column names to be used for the index * Extract the list of column names to be used for the index creation.
* creation.
*/ */
for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++) for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{ {
@ -1583,7 +1582,11 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched; newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched; newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit; newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
/* The data will be sent by the next pgstat_report_stat() call. */
/*
* The data will be sent by the next pgstat_report_stat()
* call.
*/
} }
} }
} }
@ -1614,27 +1617,26 @@ index_concurrently_set_dead(Oid heapId, Oid indexId)
Relation userIndexRelation; Relation userIndexRelation;
/* /*
* No more predicate locks will be acquired on this index, and we're * No more predicate locks will be acquired on this index, and we're about
* about to stop doing inserts into the index which could show * to stop doing inserts into the index which could show conflicts with
* conflicts with existing predicate locks, so now is the time to move * existing predicate locks, so now is the time to move them to the heap
* them to the heap relation. * relation.
*/ */
userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock); userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock); userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
TransferPredicateLocksToHeapRelation(userIndexRelation); TransferPredicateLocksToHeapRelation(userIndexRelation);
/* /*
* Now we are sure that nobody uses the index for queries; they just * Now we are sure that nobody uses the index for queries; they just might
* might have it open for updating it. So now we can unset indisready * have it open for updating it. So now we can unset indisready and
* and indislive, then wait till nobody could be using it at all * indislive, then wait till nobody could be using it at all anymore.
* anymore.
*/ */
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD); index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
/* /*
* Invalidate the relcache for the table, so that after this commit * Invalidate the relcache for the table, so that after this commit all
* all sessions will refresh the table's index list. Forgetting just * sessions will refresh the table's index list. Forgetting just the
* the index's relcache entry is not enough. * index's relcache entry is not enough.
*/ */
CacheInvalidateRelcache(userHeapRelation); CacheInvalidateRelcache(userHeapRelation);
@ -3018,6 +3020,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN, PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
0, 0, 0, 0 0, 0, 0, 0
}; };
pgstat_progress_update_multi_param(5, index, val); pgstat_progress_update_multi_param(5, index, val);
} }

View File

@ -693,10 +693,9 @@ AggregateCreate(const char *aggName,
/* /*
* If we're replacing an existing entry, we need to validate that * If we're replacing an existing entry, we need to validate that
* we're not changing anything that would break callers. * we're not changing anything that would break callers. Specifically
* Specifically we must not change aggkind or aggnumdirectargs, * we must not change aggkind or aggnumdirectargs, which affect how an
* which affect how an aggregate call is treated in parse * aggregate call is treated in parse analysis.
* analysis.
*/ */
if (aggKind != oldagg->aggkind) if (aggKind != oldagg->aggkind)
ereport(ERROR, ereport(ERROR,

View File

@ -423,7 +423,11 @@ ProcedureCreate(const char *procedureName,
prokind == PROKIND_PROCEDURE prokind == PROKIND_PROCEDURE
? errmsg("cannot change whether a procedure has output parameters") ? errmsg("cannot change whether a procedure has output parameters")
: errmsg("cannot change return type of existing function"), : errmsg("cannot change return type of existing function"),
/* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
/*
* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
* AGGREGATE
*/
errhint("Use %s %s first.", errhint("Use %s %s first.",
dropcmd, dropcmd,
format_procedure(oldproc->oid)))); format_procedure(oldproc->oid))));

View File

@ -2033,7 +2033,7 @@ get_database_oid(const char *dbname, bool missing_ok)
/* We assume that there can be at most one matching tuple */ /* We assume that there can be at most one matching tuple */
if (HeapTupleIsValid(dbtuple)) if (HeapTupleIsValid(dbtuple))
oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid; oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
else else
oid = InvalidOid; oid = InvalidOid;

View File

@ -705,8 +705,8 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
ExplainNode(ps, NIL, NULL, NULL, es); ExplainNode(ps, NIL, NULL, NULL, es);
/* /*
* If requested, include information about GUC parameters with values * If requested, include information about GUC parameters with values that
* that don't match the built-in defaults. * don't match the built-in defaults.
*/ */
ExplainPrintSettings(es); ExplainPrintSettings(es);
} }

View File

@ -3112,8 +3112,8 @@ ReindexRelationConcurrently(Oid relationOid, int options)
/* /*
* The index is now valid in the sense that it contains all currently * The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted just * interesting tuples. But since it might not contain tuples deleted
* before the reference snap was taken, we have to wait out any * just before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. * transactions that might have older snapshots.
*/ */
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE, pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,

View File

@ -465,9 +465,9 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum,
elog(ERROR, "cache lookup failed for statistics object %u", statsOid); elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
/* /*
* When none of the defined statistics types contain datum values * When none of the defined statistics types contain datum values from the
* from the table's columns then there's no need to reset the stats. * table's columns then there's no need to reset the stats. Functional
* Functional dependencies and ndistinct stats should still hold true. * dependencies and ndistinct stats should still hold true.
*/ */
if (!statext_is_kind_built(oldtup, STATS_EXT_MCV)) if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
{ {

View File

@ -1099,9 +1099,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
} }
/* /*
* Now add any newly specified CHECK constraints to the new relation. * Now add any newly specified CHECK constraints to the new relation. Same
* Same as for defaults above, but these need to come after partitioning * as for defaults above, but these need to come after partitioning is set
* is set up. * up.
*/ */
if (stmt->constraints) if (stmt->constraints)
AddRelationNewConstraints(rel, NIL, stmt->constraints, AddRelationNewConstraints(rel, NIL, stmt->constraints,
@ -1786,6 +1786,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
{ {
Relation toastrel = relation_open(toast_relid, Relation toastrel = relation_open(toast_relid,
AccessExclusiveLock); AccessExclusiveLock);
RelationSetNewRelfilenode(toastrel, RelationSetNewRelfilenode(toastrel,
toastrel->rd_rel->relpersistence); toastrel->rd_rel->relpersistence);
table_close(toastrel, NoLock); table_close(toastrel, NoLock);
@ -4336,6 +4337,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
/* nothing to do here, oid columns don't exist anymore */ /* nothing to do here, oid columns don't exist anymore */
break; break;
case AT_SetTableSpace: /* SET TABLESPACE */ case AT_SetTableSpace: /* SET TABLESPACE */
/* /*
* Only do this for partitioned tables and indexes, for which this * Only do this for partitioned tables and indexes, for which this
* is just a catalog change. Other relation types which have * is just a catalog change. Other relation types which have
@ -4626,8 +4628,8 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
{ {
/* /*
* If required, test the current data within the table against new * If required, test the current data within the table against new
* constraints generated by ALTER TABLE commands, but don't rebuild * constraints generated by ALTER TABLE commands, but don't
* data. * rebuild data.
*/ */
if (tab->constraints != NIL || tab->verify_new_notnull || if (tab->constraints != NIL || tab->verify_new_notnull ||
tab->partition_constraint != NULL) tab->partition_constraint != NULL)
@ -4798,8 +4800,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{ {
/* /*
* If we are rebuilding the tuples OR if we added any new but not * If we are rebuilding the tuples OR if we added any new but not
* verified NOT NULL constraints, check all not-null constraints. * verified NOT NULL constraints, check all not-null constraints. This
* This is a bit of overkill but it minimizes risk of bugs, and * is a bit of overkill but it minimizes risk of bugs, and
* heap_attisnull is a pretty cheap test anyway. * heap_attisnull is a pretty cheap test anyway.
*/ */
for (i = 0; i < newTupDesc->natts; i++) for (i = 0; i < newTupDesc->natts; i++)
@ -4941,8 +4943,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{ {
/* /*
* If there's no rewrite, old and new table are guaranteed to * If there's no rewrite, old and new table are guaranteed to
* have the same AM, so we can just use the old slot to * have the same AM, so we can just use the old slot to verify
* verify new constraints etc. * new constraints etc.
*/ */
insertslot = oldslot; insertslot = oldslot;
} }
@ -6209,9 +6211,8 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
/* /*
* Ordinarily phase 3 must ensure that no NULLs exist in columns that * Ordinarily phase 3 must ensure that no NULLs exist in columns that
* are set NOT NULL; however, if we can find a constraint which proves * are set NOT NULL; however, if we can find a constraint which proves
* this then we can skip that. We needn't bother looking if * this then we can skip that. We needn't bother looking if we've
* we've already found that we must verify some other NOT NULL * already found that we must verify some other NOT NULL constraint.
* constraint.
*/ */
if (!tab->verify_new_notnull && if (!tab->verify_new_notnull &&
!NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple))) !NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
@ -10657,8 +10658,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
{ {
/* /*
* Changing the type of a column that is used by a * Changing the type of a column that is used by a
* generated column is not allowed by SQL standard. * generated column is not allowed by SQL standard. It
* It might be doable with some thinking and effort. * might be doable with some thinking and effort.
*/ */
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR), (errcode(ERRCODE_SYNTAX_ERROR),
@ -10862,8 +10863,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
/* /*
* Here we go --- change the recorded column type and collation. (Note * Here we go --- change the recorded column type and collation. (Note
* heapTup is a copy of the syscache entry, so okay to scribble on.) * heapTup is a copy of the syscache entry, so okay to scribble on.) First
* First fix up the missing value if any. * fix up the missing value if any.
*/ */
if (attTup->atthasmissing) if (attTup->atthasmissing)
{ {
@ -10881,7 +10882,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
/* if it's a null array there is nothing to do */ /* if it's a null array there is nothing to do */
if (! missingNull) if (!missingNull)
{ {
/* /*
* Get the datum out of the array and repack it in a new array * Get the datum out of the array and repack it in a new array
@ -12311,8 +12312,8 @@ ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace)
Oid reloid = RelationGetRelid(rel); Oid reloid = RelationGetRelid(rel);
/* /*
* Shouldn't be called on relations having storage; these are processed * Shouldn't be called on relations having storage; these are processed in
* in phase 3. * phase 3.
*/ */
Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind)); Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
@ -15240,8 +15241,8 @@ ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *p
* not-false and try to prove the same for testConstraint. * not-false and try to prove the same for testConstraint.
* *
* Note that predicate_implied_by assumes its first argument is known * Note that predicate_implied_by assumes its first argument is known
* immutable. That should always be true for both NOT NULL and * immutable. That should always be true for both NOT NULL and partition
* partition constraints, so we don't test it here. * constraints, so we don't test it here.
*/ */
return predicate_implied_by(testConstraint, existConstraint, true); return predicate_implied_by(testConstraint, existConstraint, true);
} }

View File

@ -1143,9 +1143,9 @@ GetDefaultTablespace(char relpersistence, bool partitioned)
/* /*
* Allow explicit specification of database's default tablespace in * Allow explicit specification of database's default tablespace in
* default_tablespace without triggering permissions checks. Don't * default_tablespace without triggering permissions checks. Don't allow
* allow specifying that when creating a partitioned table, however, * specifying that when creating a partitioned table, however, since the
* since the result is confusing. * result is confusing.
*/ */
if (result == MyDatabaseTableSpace) if (result == MyDatabaseTableSpace)
{ {

View File

@ -4245,9 +4245,9 @@ AfterTriggerExecute(EState *estate,
case AFTER_TRIGGER_FDW_REUSE: case AFTER_TRIGGER_FDW_REUSE:
/* /*
* Store tuple in the slot so that tg_trigtuple does not * Store tuple in the slot so that tg_trigtuple does not reference
* reference tuplestore memory. (It is formally possible for the * tuplestore memory. (It is formally possible for the trigger
* trigger function to queue trigger events that add to the same * function to queue trigger events that add to the same
* tuplestore, which can push other tuples out of memory.) The * tuplestore, which can push other tuples out of memory.) The
* distinction is academic, because we start with a minimal tuple * distinction is academic, because we start with a minimal tuple
* that is stored as a heap tuple, constructed in different memory * that is stored as a heap tuple, constructed in different memory

View File

@ -593,8 +593,9 @@ vacuum_open_relation(Oid relid, RangeVar *relation, int options,
/* /*
* Determine the log level. * Determine the log level.
* *
* For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements * For manual VACUUM or ANALYZE, we emit a WARNING to match the log
* in the permission checks; otherwise, only log if the caller so requested. * statements in the permission checks; otherwise, only log if the caller
* so requested.
*/ */
if (!IsAutoVacuumWorkerProcess()) if (!IsAutoVacuumWorkerProcess())
elevel = WARNING; elevel = WARNING;
@ -1328,9 +1329,9 @@ vac_update_datfrozenxid(void)
} }
/* /*
* Some table AMs might not need per-relation xid / multixid * Some table AMs might not need per-relation xid / multixid horizons.
* horizons. It therefore seems reasonable to allow relfrozenxid and * It therefore seems reasonable to allow relfrozenxid and relminmxid
* relminmxid to not be set (i.e. set to their respective Invalid*Id) * to not be set (i.e. set to their respective Invalid*Id)
* independently. Thus validate and compute horizon for each only if * independently. Thus validate and compute horizon for each only if
* set. * set.
* *

View File

@ -3313,7 +3313,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
*/ */
ExprState * ExprState *
ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc, ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
const TupleTableSlotOps * lops, const TupleTableSlotOps * rops, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
int numCols, int numCols,
const AttrNumber *keyColIdx, const AttrNumber *keyColIdx,
const Oid *eqfunctions, const Oid *eqfunctions,

View File

@ -762,9 +762,9 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
* It's safe to reuse these from the partition root, as we * It's safe to reuse these from the partition root, as we
* only process one tuple at a time (therefore we won't * only process one tuple at a time (therefore we won't
* overwrite needed data in slots), and the results of * overwrite needed data in slots), and the results of
* projections are independent of the underlying * projections are independent of the underlying storage.
* storage. Projections and where clauses themselves don't * Projections and where clauses themselves don't store state
* store state / are independent of the underlying storage. * / are independent of the underlying storage.
*/ */
leaf_part_rri->ri_onConflict->oc_ProjSlot = leaf_part_rri->ri_onConflict->oc_ProjSlot =
rootResultRelInfo->ri_onConflict->oc_ProjSlot; rootResultRelInfo->ri_onConflict->oc_ProjSlot;
@ -1676,8 +1676,8 @@ ExecCreatePartitionPruneState(PlanState *planstate,
* those are reflected in our PartitionDesc but were not * those are reflected in our PartitionDesc but were not
* present in the one used to construct subplan_map and * present in the one used to construct subplan_map and
* subpart_map. So we must construct new and longer arrays * subpart_map. So we must construct new and longer arrays
* where the partitions that were originally present map to the * where the partitions that were originally present map to
* same place, and any added indexes map to -1, as if the * the same place, and any added indexes map to -1, as if the
* new partitions had been pruned. * new partitions had been pruned.
*/ */
pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts); pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);

View File

@ -490,7 +490,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
if (resultRelInfo->ri_PartitionCheck) if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true); ExecPartitionCheck(resultRelInfo, slot, estate, true);
simple_table_update(rel, tid, slot,estate->es_snapshot, simple_table_update(rel, tid, slot, estate->es_snapshot,
&update_indexes); &update_indexes);
if (resultRelInfo->ri_NumIndices > 0 && update_indexes) if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
@ -591,8 +591,8 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
const char *relname) const char *relname)
{ {
/* /*
* We currently only support writing to regular tables. However, give * We currently only support writing to regular tables. However, give a
* a more specific error for partitioned and foreign tables. * more specific error for partitioned and foreign tables.
*/ */
if (relkind == RELKIND_PARTITIONED_TABLE) if (relkind == RELKIND_PARTITIONED_TABLE)
ereport(ERROR, ereport(ERROR,

View File

@ -81,7 +81,8 @@ ExecScanFetch(ScanState *node,
/* Check if it meets the access-method conditions */ /* Check if it meets the access-method conditions */
if (!(*recheckMtd) (node, slot)) if (!(*recheckMtd) (node, slot))
return ExecClearTuple(slot); /* would not be returned by scan */ return ExecClearTuple(slot); /* would not be returned by
* scan */
return slot; return slot;
} }

View File

@ -71,8 +71,7 @@
static TupleDesc ExecTypeFromTLInternal(List *targetList, static TupleDesc ExecTypeFromTLInternal(List *targetList,
bool skipjunk); bool skipjunk);
static pg_attribute_always_inline void static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
int natts); int natts);
static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot, static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
HeapTuple tuple, HeapTuple tuple,
@ -1077,8 +1076,10 @@ TupleTableSlot *
MakeTupleTableSlot(TupleDesc tupleDesc, MakeTupleTableSlot(TupleDesc tupleDesc,
const TupleTableSlotOps *tts_ops) const TupleTableSlotOps *tts_ops)
{ {
Size basesz, allocsz; Size basesz,
allocsz;
TupleTableSlot *slot; TupleTableSlot *slot;
basesz = tts_ops->base_slot_size; basesz = tts_ops->base_slot_size;
/* /*
@ -1876,8 +1877,8 @@ slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
slot->tts_ops->getsomeattrs(slot, attnum); slot->tts_ops->getsomeattrs(slot, attnum);
/* /*
* If the underlying tuple doesn't have enough attributes, tuple descriptor * If the underlying tuple doesn't have enough attributes, tuple
* must have the missing attributes. * descriptor must have the missing attributes.
*/ */
if (unlikely(slot->tts_nvalid < attnum)) if (unlikely(slot->tts_nvalid < attnum))
{ {

View File

@ -192,9 +192,9 @@ IndexOnlyNext(IndexOnlyScanState *node)
/* /*
* Fill the scan tuple slot with data from the index. This might be * Fill the scan tuple slot with data from the index. This might be
* provided in either HeapTuple or IndexTuple format. Conceivably * provided in either HeapTuple or IndexTuple format. Conceivably an
* an index AM might fill both fields, in which case we prefer the * index AM might fill both fields, in which case we prefer the heap
* heap format, since it's probably a bit cheaper to fill a slot from. * format, since it's probably a bit cheaper to fill a slot from.
*/ */
if (scandesc->xs_hitup) if (scandesc->xs_hitup)
{ {

View File

@ -865,6 +865,7 @@ ldelete:;
goto ldelete; goto ldelete;
case TM_SelfModified: case TM_SelfModified:
/* /*
* This can be reached when following an update * This can be reached when following an update
* chain from a tuple updated by another session, * chain from a tuple updated by another session,
@ -1401,6 +1402,7 @@ lreplace:;
return NULL; return NULL;
case TM_SelfModified: case TM_SelfModified:
/* /*
* This can be reached when following an update * This can be reached when following an update
* chain from a tuple updated by another session, * chain from a tuple updated by another session,

View File

@ -131,6 +131,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
ExecInitScanTupleSlot(estate, &subquerystate->ss, ExecInitScanTupleSlot(estate, &subquerystate->ss,
ExecGetResultType(subquerystate->subplan), ExecGetResultType(subquerystate->subplan),
ExecGetResultSlotOps(subquerystate->subplan, NULL)); ExecGetResultSlotOps(subquerystate->subplan, NULL));
/* /*
* The slot used as the scantuple isn't the slot above (outside of EPQ), * The slot used as the scantuple isn't the slot above (outside of EPQ),
* but the one from the node below. * but the one from the node below.

View File

@ -195,6 +195,7 @@ be_tls_init(bool isServerStart)
int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version, int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
"ssl_min_protocol_version", "ssl_min_protocol_version",
isServerStart ? FATAL : LOG); isServerStart ? FATAL : LOG);
if (ssl_ver == -1) if (ssl_ver == -1)
goto error; goto error;
SSL_CTX_set_min_proto_version(context, ssl_ver); SSL_CTX_set_min_proto_version(context, ssl_ver);
@ -205,6 +206,7 @@ be_tls_init(bool isServerStart)
int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version, int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
"ssl_max_protocol_version", "ssl_max_protocol_version",
isServerStart ? FATAL : LOG); isServerStart ? FATAL : LOG);
if (ssl_ver == -1) if (ssl_ver == -1)
goto error; goto error;
SSL_CTX_set_max_proto_version(context, ssl_ver); SSL_CTX_set_max_proto_version(context, ssl_ver);
@ -1150,6 +1152,7 @@ be_tls_get_peer_serial(Port *port, char *ptr, size_t len)
serial = X509_get_serialNumber(port->peer); serial = X509_get_serialNumber(port->peer);
b = ASN1_INTEGER_to_BN(serial, NULL); b = ASN1_INTEGER_to_BN(serial, NULL);
decimal = BN_bn2dec(b); decimal = BN_bn2dec(b);
BN_free(b); BN_free(b);
strlcpy(ptr, decimal, len); strlcpy(ptr, decimal, len);
OPENSSL_free(decimal); OPENSSL_free(decimal);

View File

@ -476,10 +476,10 @@ bms_member_index(Bitmapset *a, int x)
} }
/* /*
* Now add bits of the last word, but only those before the item. * Now add bits of the last word, but only those before the item. We can
* We can do that by applying a mask and then using popcount again. * do that by applying a mask and then using popcount again. To get
* To get 0-based index, we want to count only preceding bits, not * 0-based index, we want to count only preceding bits, not the item
* the item itself, so we subtract 1. * itself, so we subtract 1.
*/ */
mask = ((bitmapword) 1 << bitnum) - 1; mask = ((bitmapword) 1 << bitnum) - 1;
result += bmw_popcount(a->words[wordnum] & mask); result += bmw_popcount(a->words[wordnum] & mask);

View File

@ -161,9 +161,9 @@ clauselist_selectivity_simple(PlannerInfo *root,
int listidx; int listidx;
/* /*
* If there's exactly one clause (and it was not estimated yet), just * If there's exactly one clause (and it was not estimated yet), just go
* go directly to clause_selectivity(). None of what we might do below * directly to clause_selectivity(). None of what we might do below is
* is relevant. * relevant.
*/ */
if ((list_length(clauses) == 1) && if ((list_length(clauses) == 1) &&
bms_num_members(estimatedclauses) == 0) bms_num_members(estimatedclauses) == 0)

View File

@ -311,6 +311,7 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo,
if (!root->partColsUpdated) if (!root->partColsUpdated)
root->partColsUpdated = root->partColsUpdated =
has_partition_attrs(parentrel, parentrte->updatedCols, NULL); has_partition_attrs(parentrel, parentrte->updatedCols, NULL);
/* /*
* There shouldn't be any generated columns in the partition key. * There shouldn't be any generated columns in the partition key.
*/ */

View File

@ -1053,8 +1053,8 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
InvalidOid, &found_whole_row); InvalidOid, &found_whole_row);
/* /*
* Prevent this for the same reason as for constraints below. * Prevent this for the same reason as for constraints below. Note
* Note that defaults cannot contain any vars, so it's OK that the * that defaults cannot contain any vars, so it's OK that the
* error message refers to generated columns. * error message refers to generated columns.
*/ */
if (found_whole_row) if (found_whole_row)
@ -3880,8 +3880,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
PartitionRangeDatum *prd = NULL; PartitionRangeDatum *prd = NULL;
/* /*
* Infinite range bounds -- "minvalue" and "maxvalue" -- get passed * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed in
* in as ColumnRefs. * as ColumnRefs.
*/ */
if (IsA(expr, ColumnRef)) if (IsA(expr, ColumnRef))
{ {
@ -3899,8 +3899,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
if (cname == NULL) if (cname == NULL)
{ {
/* /*
* ColumnRef is not in the desired single-field-name form. * ColumnRef is not in the desired single-field-name form. For
* For consistency between all partition strategies, let the * consistency between all partition strategies, let the
* expression transformation report any errors rather than * expression transformation report any errors rather than
* doing it ourselves. * doing it ourselves.
*/ */
@ -3965,8 +3965,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
} }
/* /*
* Once we see MINVALUE or MAXVALUE for one column, the remaining * Once we see MINVALUE or MAXVALUE for one column, the remaining columns
* columns must be the same. * must be the same.
*/ */
validateInfiniteBounds(pstate, result); validateInfiniteBounds(pstate, result);
@ -4030,9 +4030,9 @@ transformPartitionBoundValue(ParseState *pstate, Node *val,
/* /*
* Check that the input expression's collation is compatible with one * Check that the input expression's collation is compatible with one
* specified for the parent's partition key (partcollation). Don't * specified for the parent's partition key (partcollation). Don't throw
* throw an error if it's the default collation which we'll replace with * an error if it's the default collation which we'll replace with the
* the parent's collation anyway. * parent's collation anyway.
*/ */
if (IsA(value, CollateExpr)) if (IsA(value, CollateExpr))
{ {

View File

@ -74,9 +74,9 @@ RelationBuildPartitionDesc(Relation rel)
/* /*
* Get partition oids from pg_inherits. This uses a single snapshot to * Get partition oids from pg_inherits. This uses a single snapshot to
* fetch the list of children, so while more children may be getting * fetch the list of children, so while more children may be getting added
* added concurrently, whatever this function returns will be accurate * concurrently, whatever this function returns will be accurate as of
* as of some well-defined point in time. * some well-defined point in time.
*/ */
inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock); inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock);
nparts = list_length(inhoids); nparts = list_length(inhoids);
@ -122,8 +122,8 @@ RelationBuildPartitionDesc(Relation rel)
* *
* Note that this algorithm assumes that PartitionBoundSpec we manage * Note that this algorithm assumes that PartitionBoundSpec we manage
* to fetch is the right one -- so this is only good enough for * to fetch is the right one -- so this is only good enough for
* concurrent ATTACH PARTITION, not concurrent DETACH PARTITION * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION or
* or some hypothetical operation that changes the partition bounds. * some hypothetical operation that changes the partition bounds.
*/ */
if (boundspec == NULL) if (boundspec == NULL)
{ {

View File

@ -1327,8 +1327,8 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
else else
{ {
/* /*
* Maybe we already saw this tuple before in this transaction, * Maybe we already saw this tuple before in this transaction, but
* but if so it must have the same cmin. * if so it must have the same cmin.
*/ */
Assert(ent->cmin == change->data.tuplecid.cmin); Assert(ent->cmin == change->data.tuplecid.cmin);

View File

@ -730,11 +730,11 @@ copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
SpinLockRelease(&src->mutex); SpinLockRelease(&src->mutex);
/* /*
* Check if the source slot still exists and is valid. We regard it * Check if the source slot still exists and is valid. We regard it as
* as invalid if the type of replication slot or name has been * invalid if the type of replication slot or name has been changed,
* changed, or the restart_lsn either is invalid or has gone backward. * or the restart_lsn either is invalid or has gone backward. (The
* (The restart_lsn could go backwards if the source slot is dropped * restart_lsn could go backwards if the source slot is dropped and
* and copied from an older slot during installation.) * copied from an older slot during installation.)
* *
* Since erroring out will release and drop the destination slot we * Since erroring out will release and drop the destination slot we
* don't need to release it here. * don't need to release it here.

View File

@ -276,9 +276,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
WAIT_EVENT_SYNC_REP); WAIT_EVENT_SYNC_REP);
/* /*
* If the postmaster dies, we'll probably never get an * If the postmaster dies, we'll probably never get an acknowledgment,
* acknowledgment, because all the wal sender processes will exit. So * because all the wal sender processes will exit. So just bail out.
* just bail out.
*/ */
if (rc & WL_POSTMASTER_DEATH) if (rc & WL_POSTMASTER_DEATH)
{ {

View File

@ -808,11 +808,11 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
* anyway. * anyway.
* *
* Note we use _exit(2) not _exit(0). This is to force the postmaster * Note we use _exit(2) not _exit(0). This is to force the postmaster
* into a system reset cycle if someone sends a manual SIGQUIT to a * into a system reset cycle if someone sends a manual SIGQUIT to a random
* random backend. This is necessary precisely because we don't clean up * backend. This is necessary precisely because we don't clean up our
* our shared memory state. (The "dead man switch" mechanism in * shared memory state. (The "dead man switch" mechanism in pmsignal.c
* pmsignal.c should ensure the postmaster sees this as a crash, too, but * should ensure the postmaster sees this as a crash, too, but no harm in
* no harm in being doubly sure.) * being doubly sure.)
*/ */
_exit(2); _exit(2);
} }

View File

@ -279,8 +279,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
* build an array of SortItem(s) sorted using the multi-sort support * build an array of SortItem(s) sorted using the multi-sort support
* *
* XXX This relies on all stats entries pointing to the same tuple * XXX This relies on all stats entries pointing to the same tuple
* descriptor. For now that assumption holds, but it might change in * descriptor. For now that assumption holds, but it might change in the
* the future for example if we support statistics on multiple tables. * future for example if we support statistics on multiple tables.
*/ */
items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc, items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc,
mss, k, attnums_dep); mss, k, attnums_dep);
@ -300,8 +300,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
{ {
/* /*
* Check if the group ended, which may be either because we processed * Check if the group ended, which may be either because we processed
* all the items (i==nitems), or because the i-th item is not equal * all the items (i==nitems), or because the i-th item is not equal to
* to the preceding one. * the preceding one.
*/ */
if (i == nitems || if (i == nitems ||
multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0) multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0)

View File

@ -67,7 +67,7 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
int nvacatts, VacAttrStats **vacatts); int nvacatts, VacAttrStats **vacatts);
static void statext_store(Relation pg_stext, Oid relid, static void statext_store(Relation pg_stext, Oid relid,
MVNDistinct *ndistinct, MVDependencies *dependencies, MVNDistinct *ndistinct, MVDependencies *dependencies,
MCVList * mcvlist, VacAttrStats **stats); MCVList *mcvlist, VacAttrStats **stats);
/* /*
@ -317,7 +317,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
static void static void
statext_store(Relation pg_stext, Oid statOid, statext_store(Relation pg_stext, Oid statOid,
MVNDistinct *ndistinct, MVDependencies *dependencies, MVNDistinct *ndistinct, MVDependencies *dependencies,
MCVList * mcv, VacAttrStats **stats) MCVList *mcv, VacAttrStats **stats)
{ {
HeapTuple stup, HeapTuple stup,
oldtup; oldtup;
@ -538,9 +538,9 @@ build_attnums_array(Bitmapset *attrs, int *numattrs)
{ {
/* /*
* Make sure the bitmap contains only user-defined attributes. As * Make sure the bitmap contains only user-defined attributes. As
* bitmaps can't contain negative values, this can be violated in * bitmaps can't contain negative values, this can be violated in two
* two ways. Firstly, the bitmap might contain 0 as a member, and * ways. Firstly, the bitmap might contain 0 as a member, and secondly
* secondly the integer value might be larger than MaxAttrNumber. * the integer value might be larger than MaxAttrNumber.
*/ */
Assert(AttrNumberIsForUserDefinedAttr(j)); Assert(AttrNumberIsForUserDefinedAttr(j));
Assert(j <= MaxAttrNumber); Assert(j <= MaxAttrNumber);
@ -1006,9 +1006,9 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
* Pre-process the clauses list to extract the attnums seen in each item. * Pre-process the clauses list to extract the attnums seen in each item.
* We need to determine if there's any clauses which will be useful for * We need to determine if there's any clauses which will be useful for
* selectivity estimations with extended stats. Along the way we'll record * selectivity estimations with extended stats. Along the way we'll record
* all of the attnums for each clause in a list which we'll reference later * all of the attnums for each clause in a list which we'll reference
* so we don't need to repeat the same work again. We'll also keep track of * later so we don't need to repeat the same work again. We'll also keep
* all attnums seen. * track of all attnums seen.
* *
* We also skip clauses that we already estimated using different types of * We also skip clauses that we already estimated using different types of
* statistics (we treat them as incompatible). * statistics (we treat them as incompatible).
@ -1066,9 +1066,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
} }
/* /*
* First compute "simple" selectivity, i.e. without the extended statistics, * First compute "simple" selectivity, i.e. without the extended
* and essentially assuming independence of the columns/clauses. We'll then * statistics, and essentially assuming independence of the
* use the various selectivities computed from MCV list to improve it. * columns/clauses. We'll then use the various selectivities computed from
* MCV list to improve it.
*/ */
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid, simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
jointype, sjinfo, NULL); jointype, sjinfo, NULL);
@ -1112,9 +1113,9 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid,
sjinfo, rel, estimatedclauses); sjinfo, rel, estimatedclauses);
/* /*
* Then, apply functional dependencies on the remaining clauses by * Then, apply functional dependencies on the remaining clauses by calling
* calling dependencies_clauselist_selectivity. Pass 'estimatedclauses' * dependencies_clauselist_selectivity. Pass 'estimatedclauses' so the
* so the function can properly skip clauses already estimated above. * function can properly skip clauses already estimated above.
* *
* The reasoning for applying dependencies last is that the more complex * The reasoning for applying dependencies last is that the more complex
* stats can track more complex correlations between the attributes, and * stats can track more complex correlations between the attributes, and

View File

@ -209,20 +209,20 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
* *
* Using the same algorithm might exclude items that are close to the * Using the same algorithm might exclude items that are close to the
* "average" frequency of the sample. But that does not say whether the * "average" frequency of the sample. But that does not say whether the
* observed frequency is close to the base frequency or not. We also * observed frequency is close to the base frequency or not. We also need
* need to consider unexpectedly uncommon items (again, compared to the * to consider unexpectedly uncommon items (again, compared to the base
* base frequency), and the single-column algorithm does not have to. * frequency), and the single-column algorithm does not have to.
* *
* We simply decide how many items to keep by computing minimum count * We simply decide how many items to keep by computing minimum count
* using get_mincount_for_mcv_list() and then keep all items that seem * using get_mincount_for_mcv_list() and then keep all items that seem to
* to be more common than that. * be more common than that.
*/ */
mincount = get_mincount_for_mcv_list(numrows, totalrows); mincount = get_mincount_for_mcv_list(numrows, totalrows);
/* /*
* Walk the groups until we find the first group with a count below * Walk the groups until we find the first group with a count below the
* the mincount threshold (the index of that group is the number of * mincount threshold (the index of that group is the number of groups we
* groups we want to keep). * want to keep).
*/ */
for (i = 0; i < nitems; i++) for (i = 0; i < nitems; i++)
{ {
@ -485,7 +485,7 @@ statext_mcv_load(Oid mvoid)
* (or a longer type) instead of using an array of bool items. * (or a longer type) instead of using an array of bool items.
*/ */
bytea * bytea *
statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats) statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
{ {
int i; int i;
int dim; int dim;
@ -636,8 +636,8 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
* for each attribute, deduplicated values and items). * for each attribute, deduplicated values and items).
* *
* The header fields are copied one by one, so that we don't need any * The header fields are copied one by one, so that we don't need any
* explicit alignment (we copy them while deserializing). All fields * explicit alignment (we copy them while deserializing). All fields after
* after this need to be properly aligned, for direct access. * this need to be properly aligned, for direct access.
*/ */
total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32)) total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32))
+ sizeof(AttrNumber) + (ndims * sizeof(Oid))); + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
@ -650,14 +650,14 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
total_length += MAXALIGN(info[i].nbytes); total_length += MAXALIGN(info[i].nbytes);
/* /*
* And finally the items (no additional alignment needed, we start * And finally the items (no additional alignment needed, we start at
* at proper alignment and the itemsize formula uses MAXALIGN) * proper alignment and the itemsize formula uses MAXALIGN)
*/ */
total_length += mcvlist->nitems * itemsize; total_length += mcvlist->nitems * itemsize;
/* /*
* Allocate space for the whole serialized MCV list (we'll skip bytes, * Allocate space for the whole serialized MCV list (we'll skip bytes, so
* so we set them to zero to make the result more compressible). * we set them to zero to make the result more compressible).
*/ */
raw = palloc0(total_length); raw = palloc0(total_length);
SET_VARSIZE(raw, total_length); SET_VARSIZE(raw, total_length);
@ -1376,7 +1376,7 @@ pg_mcv_list_send(PG_FUNCTION_ARGS)
*/ */
static bool * static bool *
mcv_get_match_bitmap(PlannerInfo *root, List *clauses, mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
Bitmapset *keys, MCVList * mcvlist, bool is_or) Bitmapset *keys, MCVList *mcvlist, bool is_or)
{ {
int i; int i;
ListCell *l; ListCell *l;

View File

@ -2813,10 +2813,10 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
case RELKIND_MATVIEW: case RELKIND_MATVIEW:
{ {
/* /*
* Not every table AM uses BLCKSZ wide fixed size * Not every table AM uses BLCKSZ wide fixed size blocks.
* blocks. Therefore tableam returns the size in bytes - but * Therefore tableam returns the size in bytes - but for the
* for the purpose of this routine, we want the number of * purpose of this routine, we want the number of blocks.
* blocks. Therefore divide, rounding up. * Therefore divide, rounding up.
*/ */
uint64 szbytes; uint64 szbytes;

View File

@ -928,6 +928,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
const int64 values[] = { const int64 values[] = {
0, 0, 0 0, 0, 0
}; };
pgstat_progress_update_multi_param(3, index, values); pgstat_progress_update_multi_param(3, index, values);
} }

View File

@ -548,8 +548,8 @@ RegisterSyncRequest(const FileTag *ftag, SyncRequestType type,
for (;;) for (;;)
{ {
/* /*
* Notify the checkpointer about it. If we fail to queue a message * Notify the checkpointer about it. If we fail to queue a message in
* in retryOnError mode, we have to sleep and try again ... ugly, but * retryOnError mode, we have to sleep and try again ... ugly, but
* hopefully won't happen often. * hopefully won't happen often.
* *
* XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an

View File

@ -113,8 +113,8 @@ DestReceiver *
CreateDestReceiver(CommandDest dest) CreateDestReceiver(CommandDest dest)
{ {
/* /*
* It's ok to cast the constness away as any modification of the none receiver * It's ok to cast the constness away as any modification of the none
* would be a bug (which gets easier to catch this way). * receiver would be a bug (which gets easier to catch this way).
*/ */
switch (dest) switch (dest)

View File

@ -3023,6 +3023,7 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
int len, int len,
value; value;
bool fx_mode = false; bool fx_mode = false;
/* number of extra skipped characters (more than given in format string) */ /* number of extra skipped characters (more than given in format string) */
int extra_skip = 0; int extra_skip = 0;
@ -3049,8 +3050,8 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
/* /*
* In non FX (fixed format) mode one format string space or * In non FX (fixed format) mode one format string space or
* separator match to one space or separator in input string. * separator match to one space or separator in input string.
* Or match nothing if there is no space or separator in * Or match nothing if there is no space or separator in the
* the current position of input string. * current position of input string.
*/ */
extra_skip--; extra_skip--;
if (isspace((unsigned char) *s) || is_separator_char(s)) if (isspace((unsigned char) *s) || is_separator_char(s))
@ -3176,11 +3177,13 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
n->key->name))); n->key->name)));
break; break;
case DCH_TZH: case DCH_TZH:
/* /*
* Value of TZH might be negative. And the issue is that we * Value of TZH might be negative. And the issue is that we
* might swallow minus sign as the separator. So, if we have * might swallow minus sign as the separator. So, if we have
* skipped more characters than specified in the format string, * skipped more characters than specified in the format
* then we consider prepending last skipped minus to TZH. * string, then we consider prepending last skipped minus to
* TZH.
*/ */
if (*s == '+' || *s == '-' || *s == ' ') if (*s == '+' || *s == '-' || *s == ' ')
{ {

View File

@ -2378,8 +2378,8 @@ dist_ppath(PG_FUNCTION_ARGS)
Assert(path->npts > 0); Assert(path->npts > 0);
/* /*
* The distance from a point to a path is the smallest distance * The distance from a point to a path is the smallest distance from the
* from the point to any of its constituent segments. * point to any of its constituent segments.
*/ */
for (i = 0; i < path->npts; i++) for (i = 0; i < path->npts; i++)
{ {
@ -2553,9 +2553,9 @@ lseg_interpt_line(Point *result, LSEG *lseg, LINE *line)
LINE tmp; LINE tmp;
/* /*
* First, we promote the line segment to a line, because we know how * First, we promote the line segment to a line, because we know how to
* to find the intersection point of two lines. If they don't have * find the intersection point of two lines. If they don't have an
* an intersection point, we are done. * intersection point, we are done.
*/ */
line_construct(&tmp, &lseg->p[0], lseg_sl(lseg)); line_construct(&tmp, &lseg->p[0], lseg_sl(lseg));
if (!line_interpt_line(&interpt, &tmp, line)) if (!line_interpt_line(&interpt, &tmp, line))
@ -2602,8 +2602,8 @@ line_closept_point(Point *result, LINE *line, Point *point)
LINE tmp; LINE tmp;
/* /*
* We drop a perpendicular to find the intersection point. Ordinarily * We drop a perpendicular to find the intersection point. Ordinarily we
* we should always find it, but that can fail in the presence of NaN * should always find it, but that can fail in the presence of NaN
* coordinates, and perhaps even from simple roundoff issues. * coordinates, and perhaps even from simple roundoff issues.
*/ */
line_construct(&tmp, point, line_invsl(line)); line_construct(&tmp, point, line_invsl(line));
@ -2693,8 +2693,8 @@ lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg)
return 0.0; return 0.0;
/* /*
* Then, we find the closest points from the endpoints of the second * Then, we find the closest points from the endpoints of the second line
* line segment, and keep the closest one. * segment, and keep the closest one.
*/ */
dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]); dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]);
d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]); d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]);
@ -3063,7 +3063,7 @@ static bool
box_contain_point(BOX *box, Point *point) box_contain_point(BOX *box, Point *point)
{ {
return box->high.x >= point->x && box->low.x <= point->x && return box->high.x >= point->x && box->low.x <= point->x &&
box->high.y >= point->y && box->low.y <= point-> y; box->high.y >= point->y && box->low.y <= point->y;
} }
Datum Datum

View File

@ -207,7 +207,7 @@ IsValidJsonNumber(const char *str, int len)
*/ */
if (*str == '-') if (*str == '-')
{ {
dummy_lex.input = unconstify(char *, str) + 1; dummy_lex.input = unconstify(char *, str) +1;
dummy_lex.input_length = len - 1; dummy_lex.input_length = len - 1;
} }
else else

View File

@ -262,9 +262,9 @@ match_pattern_prefix(Node *leftop,
* optimized equality or prefix tests use bytewise comparisons, which is * optimized equality or prefix tests use bytewise comparisons, which is
* not consistent with nondeterministic collations. The actual * not consistent with nondeterministic collations. The actual
* pattern-matching implementation functions will later error out that * pattern-matching implementation functions will later error out that
* pattern-matching is not supported with nondeterministic collations. * pattern-matching is not supported with nondeterministic collations. (We
* (We could also error out here, but by doing it later we get more * could also error out here, but by doing it later we get more precise
* precise error messages.) (It should be possible to support at least * error messages.) (It should be possible to support at least
* Pattern_Prefix_Exact, but no point as along as the actual * Pattern_Prefix_Exact, but no point as along as the actual
* pattern-matching implementations don't support it.) * pattern-matching implementations don't support it.)
* *

View File

@ -1102,7 +1102,7 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
while (array_idx + matchctx->npatterns * 2 + 1 > array_len) while (array_idx + matchctx->npatterns * 2 + 1 > array_len)
{ {
array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */ array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */
if (array_len > MaxAllocSize/sizeof(int)) if (array_len > MaxAllocSize / sizeof(int))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many regular expression matches"))); errmsg("too many regular expression matches")));
@ -1119,6 +1119,7 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
{ {
int so = pmatch[i].rm_so; int so = pmatch[i].rm_so;
int eo = pmatch[i].rm_eo; int eo = pmatch[i].rm_eo;
matchctx->match_locs[array_idx++] = so; matchctx->match_locs[array_idx++] = so;
matchctx->match_locs[array_idx++] = eo; matchctx->match_locs[array_idx++] = eo;
if (so >= 0 && eo >= 0 && (eo - so) > maxlen) if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
@ -1129,6 +1130,7 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
{ {
int so = pmatch[0].rm_so; int so = pmatch[0].rm_so;
int eo = pmatch[0].rm_eo; int eo = pmatch[0].rm_eo;
matchctx->match_locs[array_idx++] = so; matchctx->match_locs[array_idx++] = so;
matchctx->match_locs[array_idx++] = eo; matchctx->match_locs[array_idx++] = eo;
if (so >= 0 && eo >= 0 && (eo - so) > maxlen) if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
@ -1190,10 +1192,10 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
* interest. * interest.
* *
* Worst case: assume we need the maximum size (maxlen*eml), but take * Worst case: assume we need the maximum size (maxlen*eml), but take
* advantage of the fact that the original string length in bytes is an * advantage of the fact that the original string length in bytes is
* upper bound on the byte length of any fetched substring (and we know * an upper bound on the byte length of any fetched substring (and we
* that len+1 is safe to allocate because the varlena header is longer * know that len+1 is safe to allocate because the varlena header is
* than 1 byte). * longer than 1 byte).
*/ */
if (maxsiz > orig_len) if (maxsiz > orig_len)
conv_bufsiz = orig_len + 1; conv_bufsiz = orig_len + 1;
@ -1251,6 +1253,7 @@ build_regexp_match_result(regexp_matches_ctx *matchctx)
int len = pg_wchar2mb_with_len(matchctx->wide_str + so, int len = pg_wchar2mb_with_len(matchctx->wide_str + so,
buf, buf,
eo - so); eo - so);
Assert(len < bufsiz); Assert(len < bufsiz);
elems[i] = PointerGetDatum(cstring_to_text_with_len(buf, len)); elems[i] = PointerGetDatum(cstring_to_text_with_len(buf, len));
nulls[i] = false; nulls[i] = false;
@ -1417,7 +1420,7 @@ build_regexp_split_result(regexp_matches_ctx *splitctx)
elog(ERROR, "invalid match starting position"); elog(ERROR, "invalid match starting position");
len = pg_wchar2mb_with_len(splitctx->wide_str + startpos, len = pg_wchar2mb_with_len(splitctx->wide_str + startpos,
buf, buf,
endpos-startpos); endpos - startpos);
Assert(len < bufsiz); Assert(len < bufsiz);
return PointerGetDatum(cstring_to_text_with_len(buf, len)); return PointerGetDatum(cstring_to_text_with_len(buf, len));
} }

View File

@ -635,10 +635,10 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
oldslot = trigdata->tg_trigslot; oldslot = trigdata->tg_trigslot;
/* /*
* If another PK row now exists providing the old key values, we * If another PK row now exists providing the old key values, we should
* should not do anything. However, this check should only be * not do anything. However, this check should only be made in the NO
* made in the NO ACTION case; in RESTRICT cases we don't wish to * ACTION case; in RESTRICT cases we don't wish to allow another row to be
* allow another row to be substituted. * substituted.
*/ */
if (is_no_action && if (is_no_action &&
ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo)) ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo))
@ -651,8 +651,8 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
elog(ERROR, "SPI_connect failed"); elog(ERROR, "SPI_connect failed");
/* /*
* Fetch or prepare a saved plan for the restrict lookup (it's the * Fetch or prepare a saved plan for the restrict lookup (it's the same
* same query for delete and update cases) * query for delete and update cases)
*/ */
ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_RESTRICT_CHECKREF); ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_RESTRICT_CHECKREF);
@ -813,8 +813,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
} }
/* /*
* We have a plan now. Build up the arguments from the key values * We have a plan now. Build up the arguments from the key values in the
* in the deleted PK tuple and delete the referencing rows * deleted PK tuple and delete the referencing rows
*/ */
ri_PerformCheck(riinfo, &qkey, qplan, ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel, fk_rel, pk_rel,
@ -1132,18 +1132,17 @@ ri_set(TriggerData *trigdata, bool is_set_null)
else else
{ {
/* /*
* If we just deleted or updated the PK row whose key was equal to * If we just deleted or updated the PK row whose key was equal to the
* the FK columns' default values, and a referencing row exists in * FK columns' default values, and a referencing row exists in the FK
* the FK table, we would have updated that row to the same values * table, we would have updated that row to the same values it already
* it already had --- and RI_FKey_fk_upd_check_required would * had --- and RI_FKey_fk_upd_check_required would hence believe no
* hence believe no check is necessary. So we need to do another * check is necessary. So we need to do another lookup now and in
* lookup now and in case a reference still exists, abort the * case a reference still exists, abort the operation. That is
* operation. That is already implemented in the NO ACTION * already implemented in the NO ACTION trigger, so just run it. (This
* trigger, so just run it. (This recheck is only needed in the * recheck is only needed in the SET DEFAULT case, since CASCADE would
* SET DEFAULT case, since CASCADE would remove such rows in case * remove such rows in case of a DELETE operation or would change the
* of a DELETE operation or would change the FK key values in case * FK key values in case of an UPDATE, while SET NULL is certain to
* of an UPDATE, while SET NULL is certain to result in rows that * result in rows that satisfy the FK constraint.)
* satisfy the FK constraint.)
*/ */
return ri_restrict(trigdata, true); return ri_restrict(trigdata, true);
} }
@ -1170,8 +1169,8 @@ RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel,
riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true); riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true);
/* /*
* If any old key value is NULL, the row could not have been * If any old key value is NULL, the row could not have been referenced by
* referenced by an FK row, so no check is needed. * an FK row, so no check is needed.
*/ */
if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL) if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL)
return false; return false;
@ -1213,14 +1212,17 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
*/ */
if (ri_nullcheck == RI_KEYS_ALL_NULL) if (ri_nullcheck == RI_KEYS_ALL_NULL)
return false; return false;
/* /*
* If some new key values are NULL, the behavior depends on the match type. * If some new key values are NULL, the behavior depends on the match
* type.
*/ */
else if (ri_nullcheck == RI_KEYS_SOME_NULL) else if (ri_nullcheck == RI_KEYS_SOME_NULL)
{ {
switch (riinfo->confmatchtype) switch (riinfo->confmatchtype)
{ {
case FKCONSTR_MATCH_SIMPLE: case FKCONSTR_MATCH_SIMPLE:
/* /*
* If any new key value is NULL, the row must satisfy the * If any new key value is NULL, the row must satisfy the
* constraint, so no check is needed. * constraint, so no check is needed.
@ -1228,12 +1230,14 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
return false; return false;
case FKCONSTR_MATCH_PARTIAL: case FKCONSTR_MATCH_PARTIAL:
/* /*
* Don't know, must run full check. * Don't know, must run full check.
*/ */
break; break;
case FKCONSTR_MATCH_FULL: case FKCONSTR_MATCH_FULL:
/* /*
* If some new key values are NULL, the row fails the * If some new key values are NULL, the row fails the
* constraint. We must not throw error here, because the row * constraint. We must not throw error here, because the row
@ -1251,12 +1255,12 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
*/ */
/* /*
* If the original row was inserted by our own transaction, we * If the original row was inserted by our own transaction, we must fire
* must fire the trigger whether or not the keys are equal. This * the trigger whether or not the keys are equal. This is because our
* is because our UPDATE will invalidate the INSERT so that the * UPDATE will invalidate the INSERT so that the INSERT RI trigger will
* INSERT RI trigger will not do anything; so we had better do the * not do anything; so we had better do the UPDATE check. (We could skip
* UPDATE check. (We could skip this if we knew the INSERT * this if we knew the INSERT trigger already fired, but there is no easy
* trigger already fired, but there is no easy way to know that.) * way to know that.)
*/ */
xminDatum = slot_getsysattr(oldslot, MinTransactionIdAttributeNumber, &isnull); xminDatum = slot_getsysattr(oldslot, MinTransactionIdAttributeNumber, &isnull);
Assert(!isnull); Assert(!isnull);

View File

@ -755,8 +755,8 @@ bpchareq(PG_FUNCTION_ARGS)
pg_newlocale_from_collation(collid)->deterministic) pg_newlocale_from_collation(collid)->deterministic)
{ {
/* /*
* Since we only care about equality or not-equality, we can avoid all the * Since we only care about equality or not-equality, we can avoid all
* expense of strcoll() here, and just do bitwise comparison. * the expense of strcoll() here, and just do bitwise comparison.
*/ */
if (len1 != len2) if (len1 != len2)
result = false; result = false;
@ -793,8 +793,8 @@ bpcharne(PG_FUNCTION_ARGS)
pg_newlocale_from_collation(collid)->deterministic) pg_newlocale_from_collation(collid)->deterministic)
{ {
/* /*
* Since we only care about equality or not-equality, we can avoid all the * Since we only care about equality or not-equality, we can avoid all
* expense of strcoll() here, and just do bitwise comparison. * the expense of strcoll() here, and just do bitwise comparison.
*/ */
if (len1 != len2) if (len1 != len2)
result = true; result = true;

View File

@ -1723,11 +1723,11 @@ texteq(PG_FUNCTION_ARGS)
len2; len2;
/* /*
* Since we only care about equality or not-equality, we can avoid all the * Since we only care about equality or not-equality, we can avoid all
* expense of strcoll() here, and just do bitwise comparison. In fact, we * the expense of strcoll() here, and just do bitwise comparison. In
* don't even have to do a bitwise comparison if we can show the lengths * fact, we don't even have to do a bitwise comparison if we can show
* of the strings are unequal; which might save us from having to detoast * the lengths of the strings are unequal; which might save us from
* one or both values. * having to detoast one or both values.
*/ */
len1 = toast_raw_datum_size(arg1); len1 = toast_raw_datum_size(arg1);
len2 = toast_raw_datum_size(arg2); len2 = toast_raw_datum_size(arg2);

View File

@ -2635,9 +2635,9 @@ RelationClearRelation(Relation relation, bool rebuild)
* there should be no PartitionDirectory with a pointer to the old * there should be no PartitionDirectory with a pointer to the old
* entry. * entry.
* *
* Note that newrel and relation have already been swapped, so * Note that newrel and relation have already been swapped, so the
* the "old" partition descriptor is actually the one hanging off * "old" partition descriptor is actually the one hanging off of
* of newrel. * newrel.
*/ */
MemoryContextSetParent(newrel->rd_pdcxt, relation->rd_pdcxt); MemoryContextSetParent(newrel->rd_pdcxt, relation->rd_pdcxt);
newrel->rd_partdesc = NULL; newrel->rd_partdesc = NULL;

View File

@ -653,6 +653,7 @@ hash_uint32_extended(uint32 k, uint64 seed)
/* report the result */ /* report the result */
PG_RETURN_UINT64(((uint64) b << 32) | c); PG_RETURN_UINT64(((uint64) b << 32) | c);
} }
/* /*
* string_hash: hash function for keys that are NUL-terminated strings. * string_hash: hash function for keys that are NUL-terminated strings.
* *

View File

@ -592,8 +592,8 @@ InitializeSessionUserId(const char *rolename, Oid roleid)
AssertState(!OidIsValid(AuthenticatedUserId)); AssertState(!OidIsValid(AuthenticatedUserId));
/* /*
* Make sure syscache entries are flushed for recent catalog changes. * Make sure syscache entries are flushed for recent catalog changes. This
* This allows us to find roles that were created on-the-fly during * allows us to find roles that were created on-the-fly during
* authentication. * authentication.
*/ */
AcceptInvalidationMessages(); AcceptInvalidationMessages();

View File

@ -459,13 +459,13 @@ const struct config_enum_entry ssl_protocol_versions_info[] = {
static struct config_enum_entry shared_memory_options[] = { static struct config_enum_entry shared_memory_options[] = {
#ifndef WIN32 #ifndef WIN32
{ "sysv", SHMEM_TYPE_SYSV, false}, {"sysv", SHMEM_TYPE_SYSV, false},
#endif #endif
#ifndef EXEC_BACKEND #ifndef EXEC_BACKEND
{ "mmap", SHMEM_TYPE_MMAP, false}, {"mmap", SHMEM_TYPE_MMAP, false},
#endif #endif
#ifdef WIN32 #ifdef WIN32
{ "windows", SHMEM_TYPE_WINDOWS, false}, {"windows", SHMEM_TYPE_WINDOWS, false},
#endif #endif
{NULL, 0, false} {NULL, 0, false}
}; };
@ -1599,6 +1599,7 @@ static struct config_bool ConfigureNamesBool[] =
true, true,
NULL, NULL, NULL NULL, NULL, NULL
}, },
/* /*
* WITH OIDS support, and consequently default_with_oids, was removed in * WITH OIDS support, and consequently default_with_oids, was removed in
* PostgreSQL 12, but we tolerate the parameter being set to false to * PostgreSQL 12, but we tolerate the parameter being set to false to
@ -8900,9 +8901,9 @@ get_explain_guc_options(int *num)
*num = 0; *num = 0;
/* /*
* Allocate enough space to fit all GUC_EXPLAIN options. We may not * Allocate enough space to fit all GUC_EXPLAIN options. We may not need
* need all the space, but there are fairly few such options so we * all the space, but there are fairly few such options so we don't waste
* don't waste a lot of memory. * a lot of memory.
*/ */
result = palloc(sizeof(struct config_generic *) * num_guc_explain_variables); result = palloc(sizeof(struct config_generic *) * num_guc_explain_variables);
@ -8929,6 +8930,7 @@ get_explain_guc_options(int *num)
case PGC_BOOL: case PGC_BOOL:
{ {
struct config_bool *lconf = (struct config_bool *) conf; struct config_bool *lconf = (struct config_bool *) conf;
modified = (lconf->boot_val != *(lconf->variable)); modified = (lconf->boot_val != *(lconf->variable));
} }
break; break;
@ -8936,6 +8938,7 @@ get_explain_guc_options(int *num)
case PGC_INT: case PGC_INT:
{ {
struct config_int *lconf = (struct config_int *) conf; struct config_int *lconf = (struct config_int *) conf;
modified = (lconf->boot_val != *(lconf->variable)); modified = (lconf->boot_val != *(lconf->variable));
} }
break; break;
@ -8943,6 +8946,7 @@ get_explain_guc_options(int *num)
case PGC_REAL: case PGC_REAL:
{ {
struct config_real *lconf = (struct config_real *) conf; struct config_real *lconf = (struct config_real *) conf;
modified = (lconf->boot_val != *(lconf->variable)); modified = (lconf->boot_val != *(lconf->variable));
} }
break; break;
@ -8950,6 +8954,7 @@ get_explain_guc_options(int *num)
case PGC_STRING: case PGC_STRING:
{ {
struct config_string *lconf = (struct config_string *) conf; struct config_string *lconf = (struct config_string *) conf;
modified = (strcmp(lconf->boot_val, *(lconf->variable)) != 0); modified = (strcmp(lconf->boot_val, *(lconf->variable)) != 0);
} }
break; break;
@ -8957,6 +8962,7 @@ get_explain_guc_options(int *num)
case PGC_ENUM: case PGC_ENUM:
{ {
struct config_enum *lconf = (struct config_enum *) conf; struct config_enum *lconf = (struct config_enum *) conf;
modified = (lconf->boot_val != *(lconf->variable)); modified = (lconf->boot_val != *(lconf->variable));
} }
break; break;

View File

@ -1676,6 +1676,7 @@ ensure_active_superblock(dsa_area *area, dsa_area_pool *pool,
return false; return false;
} }
} }
/* /*
* This shouldn't happen: get_best_segment() or make_new_segment() * This shouldn't happen: get_best_segment() or make_new_segment()
* promised that we can successfully allocate npages. * promised that we can successfully allocate npages.

View File

@ -3067,8 +3067,8 @@ main(int argc, char *argv[])
char pg_ctl_path[MAXPGPATH]; char pg_ctl_path[MAXPGPATH];
/* /*
* Ensure that buffering behavior of stdout matches what it is * Ensure that buffering behavior of stdout matches what it is in
* in interactive usage (at least on most platforms). This prevents * interactive usage (at least on most platforms). This prevents
* unexpected output ordering when, eg, output is redirected to a file. * unexpected output ordering when, eg, output is redirected to a file.
* POSIX says we must do this before any other usage of these files. * POSIX says we must do this before any other usage of these files.
*/ */

View File

@ -123,7 +123,8 @@ CleanupPriorWALFiles(void)
if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) && if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) &&
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
{ {
char WALFilePath[MAXPGPATH * 2]; /* the file path including archive */ char WALFilePath[MAXPGPATH * 2]; /* the file path
* including archive */
/* /*
* Use the original file name again now, including any * Use the original file name again now, including any

View File

@ -3415,7 +3415,8 @@ static void
_selectTableAccessMethod(ArchiveHandle *AH, const char *tableam) _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
{ {
PQExpBuffer cmd; PQExpBuffer cmd;
const char *want, *have; const char *want,
*have;
have = AH->currTableAm; have = AH->currTableAm;
want = tableam; want = tableam;

View File

@ -910,11 +910,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
int byt; int byt;
/* /*
* Note: if we are at EOF with a pre-1.3 input file, we'll fatal() * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside
* inside ReadInt rather than returning EOF. It doesn't seem worth * ReadInt rather than returning EOF. It doesn't seem worth jumping
* jumping through hoops to deal with that case better, because no such * through hoops to deal with that case better, because no such files are
* files are likely to exist in the wild: only some 7.1 development * likely to exist in the wild: only some 7.1 development versions of
* versions of pg_dump ever generated such files. * pg_dump ever generated such files.
*/ */
if (AH->version < K_VERS_1_3) if (AH->version < K_VERS_1_3)
*type = BLK_DATA; *type = BLK_DATA;

View File

@ -1112,13 +1112,14 @@ setup_connection(Archive *AH, const char *dumpencoding,
ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES"); ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
/* /*
* Use an explicitly specified extra_float_digits if it has been * Use an explicitly specified extra_float_digits if it has been provided.
* provided. Otherwise, set extra_float_digits so that we can dump float * Otherwise, set extra_float_digits so that we can dump float data
* data exactly (given correctly implemented float I/O code, anyway). * exactly (given correctly implemented float I/O code, anyway).
*/ */
if (have_extra_float_digits) if (have_extra_float_digits)
{ {
PQExpBuffer q = createPQExpBuffer(); PQExpBuffer q = createPQExpBuffer();
appendPQExpBuffer(q, "SET extra_float_digits TO %d", appendPQExpBuffer(q, "SET extra_float_digits TO %d",
extra_float_digits); extra_float_digits);
ExecuteSqlStatement(AH, q->data); ExecuteSqlStatement(AH, q->data);

View File

@ -1406,8 +1406,8 @@ expand_dbname_patterns(PGconn *conn,
/* /*
* The loop below runs multiple SELECTs, which might sometimes result in * The loop below runs multiple SELECTs, which might sometimes result in
* duplicate entries in the name list, but we don't care, since all * duplicate entries in the name list, but we don't care, since all we're
* we're going to do is test membership of the list. * going to do is test membership of the list.
*/ */
for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next) for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)

View File

@ -140,11 +140,12 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p++; /* remove ':' char */ p++; /* remove ':' char */
/* /*
* We checked earlier for a postmaster lock file, and if we found * We checked earlier for a postmaster lock file, and if we
* one, we tried to start/stop the server to replay the WAL. However, * found one, we tried to start/stop the server to replay the
* pg_ctl -m immediate doesn't leave a lock file, but does require * WAL. However, pg_ctl -m immediate doesn't leave a lock
* WAL replay, so we check here that the server was shut down cleanly, * file, but does require WAL replay, so we check here that
* from the controldata perspective. * the server was shut down cleanly, from the controldata
* perspective.
*/ */
/* remove leading spaces */ /* remove leading spaces */
while (*p == ' ') while (*p == ' ')

View File

@ -213,16 +213,16 @@ check_loadable_libraries(void)
{ {
/* /*
* In Postgres 9.0, Python 3 support was added, and to do that, a * In Postgres 9.0, Python 3 support was added, and to do that, a
* plpython2u language was created with library name plpython2.so as a * plpython2u language was created with library name plpython2.so
* symbolic link to plpython.so. In Postgres 9.1, only the * as a symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and plpython2u * plpython2.so library was created, and both plpythonu and
* pointing to it. For this reason, any reference to library name * plpython2u pointing to it. For this reason, any reference to
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in * library name "plpython" in an old PG <= 9.1 cluster must look
* the new cluster. * for "plpython2" in the new cluster.
* *
* For this case, we could check pg_pltemplate, but that only works * For this case, we could check pg_pltemplate, but that only
* for languages, and does not help with function shared objects, so * works for languages, and does not help with function shared
* we just do a general fix. * objects, so we just do a general fix.
*/ */
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
strcmp(lib, "$libdir/plpython") == 0) strcmp(lib, "$libdir/plpython") == 0)

View File

@ -422,8 +422,8 @@ typedef struct
/* /*
* Separate randomness for each thread. Each thread option uses its own * Separate randomness for each thread. Each thread option uses its own
* random state to make all of them independent of each other and therefore * random state to make all of them independent of each other and
* deterministic at the thread level. * therefore deterministic at the thread level.
*/ */
RandomState ts_choose_rs; /* random state for selecting a script */ RandomState ts_choose_rs; /* random state for selecting a script */
RandomState ts_throttle_rs; /* random state for transaction throttling */ RandomState ts_throttle_rs; /* random state for transaction throttling */

View File

@ -671,9 +671,9 @@ to_chars_df(const floating_decimal_64 v, const uint32 olength, char *const resul
else else
{ {
/* /*
* We can save some code later by pre-filling with zeros. We know * We can save some code later by pre-filling with zeros. We know that
* that there can be no more than 16 output digits in this form, * there can be no more than 16 output digits in this form, otherwise
* otherwise we would not choose fixed-point output. * we would not choose fixed-point output.
*/ */
Assert(exp < 16 && exp + olength <= 16); Assert(exp < 16 && exp + olength <= 16);
memset(result, '0', 16); memset(result, '0', 16);
@ -800,8 +800,8 @@ to_chars(floating_decimal_64 v, const bool sign, char *const result)
/* /*
* The thresholds for fixed-point output are chosen to match printf * The thresholds for fixed-point output are chosen to match printf
* defaults. Beware that both the code of to_chars_df and the value * defaults. Beware that both the code of to_chars_df and the value of
* of DOUBLE_SHORTEST_DECIMAL_LEN are sensitive to these thresholds. * DOUBLE_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
*/ */
if (exp >= -4 && exp < 15) if (exp >= -4 && exp < 15)
return to_chars_df(v, olength, result + index) + sign; return to_chars_df(v, olength, result + index) + sign;

View File

@ -481,10 +481,10 @@ to_chars_f(const floating_decimal_32 v, const uint32 olength, char *const result
else else
{ {
/* /*
* We can save some code later by pre-filling with zeros. We know * We can save some code later by pre-filling with zeros. We know that
* that there can be no more than 6 output digits in this form, * there can be no more than 6 output digits in this form, otherwise
* otherwise we would not choose fixed-point output. memset 8 * we would not choose fixed-point output. memset 8 rather than 6
* rather than 6 bytes to let the compiler optimize it. * bytes to let the compiler optimize it.
*/ */
Assert(exp < 6 && exp + olength <= 6); Assert(exp < 6 && exp + olength <= 6);
memset(result, '0', 8); memset(result, '0', 8);
@ -575,8 +575,8 @@ to_chars(const floating_decimal_32 v, const bool sign, char *const result)
/* /*
* The thresholds for fixed-point output are chosen to match printf * The thresholds for fixed-point output are chosen to match printf
* defaults. Beware that both the code of to_chars_f and the value * defaults. Beware that both the code of to_chars_f and the value of
* of FLOAT_SHORTEST_DECIMAL_LEN are sensitive to these thresholds. * FLOAT_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
*/ */
if (exp >= -4 && exp < 6) if (exp >= -4 && exp < 6)
return to_chars_f(v, olength, result + index) + sign; return to_chars_f(v, olength, result + index) + sign;

View File

@ -18,8 +18,8 @@ enum pg_log_level __pg_log_level;
static const char *progname; static const char *progname;
static int log_flags; static int log_flags;
static void (*log_pre_callback)(void); static void (*log_pre_callback) (void);
static void (*log_locus_callback)(const char **, uint64 *); static void (*log_locus_callback) (const char **, uint64 *);
static const char *sgr_error = NULL; static const char *sgr_error = NULL;
static const char *sgr_warning = NULL; static const char *sgr_warning = NULL;
@ -111,19 +111,19 @@ pg_logging_set_level(enum pg_log_level new_level)
} }
void void
pg_logging_set_pre_callback(void (*cb)(void)) pg_logging_set_pre_callback(void (*cb) (void))
{ {
log_pre_callback = cb; log_pre_callback = cb;
} }
void void
pg_logging_set_locus_callback(void (*cb)(const char **filename, uint64 *lineno)) pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno))
{ {
log_locus_callback = cb; log_locus_callback = cb;
} }
void void
pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...) pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...)
{ {
va_list ap; va_list ap;
@ -133,7 +133,7 @@ pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...)
} }
void void
pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_list ap) pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap)
{ {
int save_errno = errno; int save_errno = errno;
const char *filename = NULL; const char *filename = NULL;

View File

@ -759,10 +759,9 @@ pglz_decompress(const char *source, int32 slen, char *dest,
} }
/* /*
* Check we decompressed the right amount. * Check we decompressed the right amount. If we are slicing, then we
* If we are slicing, then we won't necessarily * won't necessarily be at the end of the source or dest buffers when we
* be at the end of the source or dest buffers * hit a stop, so we don't test them.
* when we hit a stop, so we don't test them.
*/ */
if (check_complete && (dp != destend || sp != srcend)) if (check_complete && (dp != destend || sp != srcend))
return -1; return -1;
@ -770,5 +769,5 @@ pglz_decompress(const char *source, int32 slen, char *dest,
/* /*
* That's it. * That's it.
*/ */
return (char*)dp - dest; return (char *) dp - dest;
} }

View File

@ -18,9 +18,10 @@
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#define XLOG_GIST_PAGE_UPDATE 0x00 #define XLOG_GIST_PAGE_UPDATE 0x00
#define XLOG_GIST_DELETE 0x10 /* delete leaf index tuples for a page */ #define XLOG_GIST_DELETE 0x10 /* delete leaf index tuples for a
#define XLOG_GIST_PAGE_REUSE 0x20 /* old page is about to be reused from * page */
* FSM */ #define XLOG_GIST_PAGE_REUSE 0x20 /* old page is about to be reused
* from FSM */
#define XLOG_GIST_PAGE_SPLIT 0x30 #define XLOG_GIST_PAGE_SPLIT 0x30
/* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */ /* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */
/* #define XLOG_GIST_CREATE_INDEX 0x50 */ /* not used anymore */ /* #define XLOG_GIST_CREATE_INDEX 0x50 */ /* not used anymore */
@ -83,7 +84,8 @@ typedef struct gistxlogPageSplit
typedef struct gistxlogPageDelete typedef struct gistxlogPageDelete
{ {
TransactionId deleteXid; /* last Xid which could see page in scan */ TransactionId deleteXid; /* last Xid which could see page in scan */
OffsetNumber downlinkOffset; /* Offset of downlink referencing this page */ OffsetNumber downlinkOffset; /* Offset of downlink referencing this
* page */
} gistxlogPageDelete; } gistxlogPageDelete;
#define SizeOfGistxlogPageDelete (offsetof(gistxlogPageDelete, downlinkOffset) + sizeof(OffsetNumber)) #define SizeOfGistxlogPageDelete (offsetof(gistxlogPageDelete, downlinkOffset) + sizeof(OffsetNumber))

View File

@ -18,7 +18,7 @@
#include "storage/off.h" #include "storage/off.h"
/* XLOG record types for SPGiST */ /* XLOG record types for SPGiST */
/* #define XLOG_SPGIST_CREATE_INDEX 0x00 */ /* not used anymore */ /* #define XLOG_SPGIST_CREATE_INDEX 0x00 */ /* not used anymore */
#define XLOG_SPGIST_ADD_LEAF 0x10 #define XLOG_SPGIST_ADD_LEAF 0x10
#define XLOG_SPGIST_MOVE_LEAFS 0x20 #define XLOG_SPGIST_MOVE_LEAFS 0x20
#define XLOG_SPGIST_ADD_NODE 0x30 #define XLOG_SPGIST_ADD_NODE 0x30

View File

@ -575,10 +575,10 @@ typedef struct TableAmRoutine
/* /*
* This callback should return true if the relation requires a TOAST table * This callback should return true if the relation requires a TOAST table
* and false if it does not. It may wish to examine the relation's * and false if it does not. It may wish to examine the relation's tuple
* tuple descriptor before making a decision, but if it uses some other * descriptor before making a decision, but if it uses some other method
* method of storing large values (or if it does not support them) it can * of storing large values (or if it does not support them) it can simply
* simply return false. * return false.
*/ */
bool (*relation_needs_toast_table) (Relation rel); bool (*relation_needs_toast_table) (Relation rel);

View File

@ -136,7 +136,8 @@ typedef enum ObjectClass
#define PERFORM_DELETION_QUIETLY 0x0004 /* suppress notices */ #define PERFORM_DELETION_QUIETLY 0x0004 /* suppress notices */
#define PERFORM_DELETION_SKIP_ORIGINAL 0x0008 /* keep original obj */ #define PERFORM_DELETION_SKIP_ORIGINAL 0x0008 /* keep original obj */
#define PERFORM_DELETION_SKIP_EXTENSIONS 0x0010 /* keep extensions */ #define PERFORM_DELETION_SKIP_EXTENSIONS 0x0010 /* keep extensions */
#define PERFORM_DELETION_CONCURRENT_LOCK 0x0020 /* normal drop with concurrent lock mode */ #define PERFORM_DELETION_CONCURRENT_LOCK 0x0020 /* normal drop with
* concurrent lock mode */
/* in dependency.c */ /* in dependency.c */

View File

@ -34,7 +34,8 @@ CATALOG(pg_attrdef,2604,AttrDefaultRelationId)
int16 adnum; /* attnum of attribute */ int16 adnum; /* attnum of attribute */
#ifdef CATALOG_VARLEN /* variable-length fields start here */ #ifdef CATALOG_VARLEN /* variable-length fields start here */
pg_node_tree adbin BKI_FORCE_NOT_NULL; /* nodeToString representation of default */ pg_node_tree adbin BKI_FORCE_NOT_NULL; /* nodeToString representation of
* default */
#endif #endif
} FormData_pg_attrdef; } FormData_pg_attrdef;

View File

@ -35,7 +35,8 @@ CATALOG(pg_default_acl,826,DefaultAclRelationId)
char defaclobjtype; /* see DEFACLOBJ_xxx constants below */ char defaclobjtype; /* see DEFACLOBJ_xxx constants below */
#ifdef CATALOG_VARLEN /* variable-length fields start here */ #ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem defaclacl[1] BKI_FORCE_NOT_NULL; /* permissions to add at CREATE time */ aclitem defaclacl[1] BKI_FORCE_NOT_NULL; /* permissions to add at
* CREATE time */
#endif #endif
} FormData_pg_default_acl; } FormData_pg_default_acl;

View File

@ -35,7 +35,8 @@ CATALOG(pg_policy,3256,PolicyRelationId)
bool polpermissive; /* restrictive or permissive policy */ bool polpermissive; /* restrictive or permissive policy */
#ifdef CATALOG_VARLEN #ifdef CATALOG_VARLEN
Oid polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with policy */ Oid polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with
* policy */
pg_node_tree polqual; /* Policy quals. */ pg_node_tree polqual; /* Policy quals. */
pg_node_tree polwithcheck; /* WITH CHECK quals. */ pg_node_tree polwithcheck; /* WITH CHECK quals. */
#endif #endif

View File

@ -66,11 +66,11 @@ extern enum pg_log_level __pg_log_level;
void pg_logging_init(const char *argv0); void pg_logging_init(const char *argv0);
void pg_logging_config(int new_flags); void pg_logging_config(int new_flags);
void pg_logging_set_level(enum pg_log_level new_level); void pg_logging_set_level(enum pg_log_level new_level);
void pg_logging_set_pre_callback(void (*cb)(void)); void pg_logging_set_pre_callback(void (*cb) (void));
void pg_logging_set_locus_callback(void (*cb)(const char **filename, uint64 *lineno)); void pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno));
void pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...) pg_attribute_printf(2, 3); void pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...) pg_attribute_printf(2, 3);
void pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0); void pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0);
#define pg_log_fatal(...) do { \ #define pg_log_fatal(...) do { \
if (likely(__pg_log_level <= PG_LOG_FATAL)) pg_log_generic(PG_LOG_FATAL, __VA_ARGS__); \ if (likely(__pg_log_level <= PG_LOG_FATAL)) pg_log_generic(PG_LOG_FATAL, __VA_ARGS__); \

View File

@ -138,10 +138,10 @@ struct TupleTableSlotOps
size_t base_slot_size; size_t base_slot_size;
/* Initialization. */ /* Initialization. */
void (*init)(TupleTableSlot *slot); void (*init) (TupleTableSlot *slot);
/* Destruction. */ /* Destruction. */
void (*release)(TupleTableSlot *slot); void (*release) (TupleTableSlot *slot);
/* /*
* Clear the contents of the slot. Only the contents are expected to be * Clear the contents of the slot. Only the contents are expected to be
@ -149,7 +149,7 @@ struct TupleTableSlotOps
* this callback should free the memory allocated for the tuple contained * this callback should free the memory allocated for the tuple contained
* in the slot. * in the slot.
*/ */
void (*clear)(TupleTableSlot *slot); void (*clear) (TupleTableSlot *slot);
/* /*
* Fill up first natts entries of tts_values and tts_isnull arrays with * Fill up first natts entries of tts_values and tts_isnull arrays with
@ -158,20 +158,20 @@ struct TupleTableSlotOps
* in which case it should set tts_nvalid to the number of returned * in which case it should set tts_nvalid to the number of returned
* columns. * columns.
*/ */
void (*getsomeattrs)(TupleTableSlot *slot, int natts); void (*getsomeattrs) (TupleTableSlot *slot, int natts);
/* /*
* Returns value of the given system attribute as a datum and sets isnull * Returns value of the given system attribute as a datum and sets isnull
* to false, if it's not NULL. Throws an error if the slot type does not * to false, if it's not NULL. Throws an error if the slot type does not
* support system attributes. * support system attributes.
*/ */
Datum (*getsysattr)(TupleTableSlot *slot, int attnum, bool *isnull); Datum (*getsysattr) (TupleTableSlot *slot, int attnum, bool *isnull);
/* /*
* Make the contents of the slot solely depend on the slot, and not on * Make the contents of the slot solely depend on the slot, and not on
* underlying resources (like another memory context, buffers, etc). * underlying resources (like another memory context, buffers, etc).
*/ */
void (*materialize)(TupleTableSlot *slot); void (*materialize) (TupleTableSlot *slot);
/* /*
* Copy the contents of the source slot into the destination slot's own * Copy the contents of the source slot into the destination slot's own
@ -185,7 +185,7 @@ struct TupleTableSlotOps
* heap tuple, it should not implement this callback and should set it as * heap tuple, it should not implement this callback and should set it as
* NULL. * NULL.
*/ */
HeapTuple (*get_heap_tuple)(TupleTableSlot *slot); HeapTuple (*get_heap_tuple) (TupleTableSlot *slot);
/* /*
* Return a minimal tuple "owned" by the slot. It is slot's responsibility * Return a minimal tuple "owned" by the slot. It is slot's responsibility
@ -193,7 +193,7 @@ struct TupleTableSlotOps
* "own" a minimal tuple, it should not implement this callback and should * "own" a minimal tuple, it should not implement this callback and should
* set it as NULL. * set it as NULL.
*/ */
MinimalTuple (*get_minimal_tuple)(TupleTableSlot *slot); MinimalTuple (*get_minimal_tuple) (TupleTableSlot *slot);
/* /*
* Return a copy of heap tuple representing the contents of the slot. The * Return a copy of heap tuple representing the contents of the slot. The
@ -203,17 +203,17 @@ struct TupleTableSlotOps
* the slot i.e. the caller has to take responsibilty to free memory * the slot i.e. the caller has to take responsibilty to free memory
* consumed by the slot. * consumed by the slot.
*/ */
HeapTuple (*copy_heap_tuple)(TupleTableSlot *slot); HeapTuple (*copy_heap_tuple) (TupleTableSlot *slot);
/* /*
* Return a copy of minimal tuple representing the contents of the slot. The * Return a copy of minimal tuple representing the contents of the slot.
* copy needs to be palloc'd in the current memory context. The slot * The copy needs to be palloc'd in the current memory context. The slot
* itself is expected to remain unaffected. It is *not* expected to have * itself is expected to remain unaffected. It is *not* expected to have
* meaningful "system columns" in the copy. The copy is not be "owned" by * meaningful "system columns" in the copy. The copy is not be "owned" by
* the slot i.e. the caller has to take responsibilty to free memory * the slot i.e. the caller has to take responsibilty to free memory
* consumed by the slot. * consumed by the slot.
*/ */
MinimalTuple (*copy_minimal_tuple)(TupleTableSlot *slot); MinimalTuple (*copy_minimal_tuple) (TupleTableSlot *slot);
}; };
/* /*

View File

@ -1113,8 +1113,8 @@ typedef struct ModifyTableState
PlanState **mt_plans; /* subplans (one per target rel) */ PlanState **mt_plans; /* subplans (one per target rel) */
int mt_nplans; /* number of plans in the array */ int mt_nplans; /* number of plans in the array */
int mt_whichplan; /* which one is being executed (0..n-1) */ int mt_whichplan; /* which one is being executed (0..n-1) */
TupleTableSlot** mt_scans; /* input tuple corresponding to underlying TupleTableSlot **mt_scans; /* input tuple corresponding to underlying
plans */ * plans */
ResultRelInfo *resultRelInfo; /* per-subplan target relations */ ResultRelInfo *resultRelInfo; /* per-subplan target relations */
ResultRelInfo *rootResultRelInfo; /* root target relation (partitioned ResultRelInfo *rootResultRelInfo; /* root target relation (partitioned
* table root) */ * table root) */

View File

@ -508,6 +508,7 @@ extern char *inet_net_ntop(int af, const void *src, int bits,
/* port/pg_strong_random.c */ /* port/pg_strong_random.c */
extern bool pg_strong_random(void *buf, size_t len); extern bool pg_strong_random(void *buf, size_t len);
/* /*
* pg_backend_random used to be a wrapper for pg_strong_random before * pg_backend_random used to be a wrapper for pg_strong_random before
* Postgres 12 for the backend code. * Postgres 12 for the backend code.

View File

@ -47,8 +47,8 @@ typedef struct LogicalDecodingContext
/* /*
* Marks the logical decoding context as fast forward decoding one. Such a * Marks the logical decoding context as fast forward decoding one. Such a
* context does not have plugin loaded so most of the following * context does not have plugin loaded so most of the following properties
* properties are unused. * are unused.
*/ */
bool fast_forward; bool fast_forward;

View File

@ -402,7 +402,7 @@ void ReorderBufferReturnTupleBuf(ReorderBuffer *, ReorderBufferTupleBuf *tuple)
ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *); ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *);
void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *); void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
Oid * ReorderBufferGetRelids(ReorderBuffer *, int nrelids); Oid *ReorderBufferGetRelids(ReorderBuffer *, int nrelids);
void ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids); void ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids);
void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *); void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);

View File

@ -71,8 +71,8 @@ extern MVDependencies *statext_dependencies_deserialize(bytea *data);
extern MCVList *statext_mcv_build(int numrows, HeapTuple *rows, extern MCVList *statext_mcv_build(int numrows, HeapTuple *rows,
Bitmapset *attrs, VacAttrStats **stats, Bitmapset *attrs, VacAttrStats **stats,
double totalrows); double totalrows);
extern bytea *statext_mcv_serialize(MCVList * mcv, VacAttrStats **stats); extern bytea *statext_mcv_serialize(MCVList *mcv, VacAttrStats **stats);
extern MCVList * statext_mcv_deserialize(bytea *data); extern MCVList *statext_mcv_deserialize(bytea *data);
extern MultiSortSupport multi_sort_init(int ndims); extern MultiSortSupport multi_sort_init(int ndims);
extern void multi_sort_add_dimension(MultiSortSupport mss, int sortdim, extern void multi_sort_add_dimension(MultiSortSupport mss, int sortdim,

View File

@ -25,8 +25,8 @@ static bool find_cursor(const char *, const struct connection *);
* others --- keep same as the parameters in ECPGdo() function * others --- keep same as the parameters in ECPGdo() function
*/ */
bool bool
ECPGopen(const char *cursor_name,const char *prepared_name, ECPGopen(const char *cursor_name, const char *prepared_name,
const int lineno, const int compat,const int force_indicator, const int lineno, const int compat, const int force_indicator,
const char *connection_name, const bool questionmarks, const char *connection_name, const bool questionmarks,
const int st, const char *query,...) const int st, const char *query,...)
{ {
@ -53,8 +53,8 @@ ECPGopen(const char *cursor_name,const char *prepared_name,
else else
{ {
/* /*
* If can't get the connection name by declared name then using connection name * If can't get the connection name by declared name then using
* coming from the parameter connection_name * connection name coming from the parameter connection_name
*/ */
real_connection_name = connection_name; real_connection_name = connection_name;
} }
@ -81,7 +81,7 @@ ECPGopen(const char *cursor_name,const char *prepared_name,
*/ */
bool bool
ECPGfetch(const char *cursor_name, ECPGfetch(const char *cursor_name,
const int lineno, const int compat,const int force_indicator, const int lineno, const int compat, const int force_indicator,
const char *connection_name, const bool questionmarks, const char *connection_name, const bool questionmarks,
const int st, const char *query,...) const int st, const char *query,...)
{ {
@ -99,8 +99,8 @@ ECPGfetch(const char *cursor_name,
if (real_connection_name == NULL) if (real_connection_name == NULL)
{ {
/* /*
* If can't get the connection name by cursor name then using connection name * If can't get the connection name by cursor name then using
* coming from the parameter connection_name * connection name coming from the parameter connection_name
*/ */
real_connection_name = connection_name; real_connection_name = connection_name;
} }
@ -123,7 +123,7 @@ ECPGfetch(const char *cursor_name,
*/ */
bool bool
ECPGclose(const char *cursor_name, ECPGclose(const char *cursor_name,
const int lineno, const int compat,const int force_indicator, const int lineno, const int compat, const int force_indicator,
const char *connection_name, const bool questionmarks, const char *connection_name, const bool questionmarks,
const int st, const char *query,...) const int st, const char *query,...)
{ {
@ -142,8 +142,8 @@ ECPGclose(const char *cursor_name,
if (real_connection_name == NULL) if (real_connection_name == NULL)
{ {
/* /*
* If can't get the connection name by cursor name then using connection name * If can't get the connection name by cursor name then using
* coming from the parameter connection_name * connection name coming from the parameter connection_name
*/ */
real_connection_name = connection_name; real_connection_name = connection_name;
} }
@ -197,7 +197,7 @@ add_cursor(const int lineno, const char *cursor_name, const char *connection_nam
} }
/* allocate a node to store the new cursor */ /* allocate a node to store the new cursor */
new = (struct cursor_statement *)ecpg_alloc(sizeof(struct cursor_statement), lineno); new = (struct cursor_statement *) ecpg_alloc(sizeof(struct cursor_statement), lineno);
if (new) if (new)
{ {
new->name = ecpg_strdup(cursor_name, lineno); new->name = ecpg_strdup(cursor_name, lineno);

View File

@ -603,8 +603,7 @@ set_desc_attr(struct descriptor_item *desc_item, struct variable *var,
desc_item->data_len = variable->len; desc_item->data_len = variable->len;
} }
ecpg_free(desc_item->data); /* free() takes care of a ecpg_free(desc_item->data); /* free() takes care of a potential NULL value */
* potential NULL value */
desc_item->data = (char *) tobeinserted; desc_item->data = (char *) tobeinserted;
} }
@ -875,8 +874,8 @@ ECPGdescribe(int line, int compat, bool input, const char *connection_name, cons
if (real_connection_name == NULL) if (real_connection_name == NULL)
{ {
/* /*
* If can't get the connection name by declared name then using connection name * If can't get the connection name by declared name then using
* coming from the parameter connection_name * connection name coming from the parameter connection_name
*/ */
real_connection_name = connection_name; real_connection_name = connection_name;
} }

View File

@ -107,7 +107,7 @@ struct prepared_statement
struct cursor_statement struct cursor_statement
{ {
char *name; /*cursor name*/ char *name; /* cursor name */
struct cursor_statement *next; struct cursor_statement *next;
}; };

Some files were not shown because too many files have changed in this diff Show More