diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index e84b5edc035..f6aced03d37 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -395,7 +395,6 @@ moveLeafs(Relation index, SpGistState *state, size; Buffer nbuf; Page npage; - SpGistLeafTuple it; OffsetNumber r = InvalidOffsetNumber, startOffset = InvalidOffsetNumber; bool replaceDead = false; @@ -467,6 +466,8 @@ moveLeafs(Relation index, SpGistState *state, { for (i = 0; i < nDelete; i++) { + SpGistLeafTuple it; + it = (SpGistLeafTuple) PageGetItem(current->page, PageGetItemId(current->page, toDelete[i])); Assert(it->tupstate == SPGIST_LIVE); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 62a09fb131b..7661e004a93 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -1149,7 +1149,6 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString, PartitionDesc partdesc = RelationGetPartitionDesc(rel, true); List *idxs = NIL; List *childTbls = NIL; - ListCell *l; int i; MemoryContext oldcxt, perChildCxt; @@ -1181,6 +1180,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString, for (i = 0; i < partdesc->nparts; i++) { Oid indexOnChild = InvalidOid; + ListCell *l; ListCell *l2; CreateTrigStmt *childStmt; Relation childTbl; diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index c48d92259f9..841896c7781 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -1080,7 +1080,6 @@ static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) { ParallelHashJoinState *pstate = hashtable->parallel_state; - int i; Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); @@ -1244,7 +1243,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) ExecParallelHashTableSetCurrentBatch(hashtable, 0); /* Are any of the new generation of batches exhausted? */ - for (i = 0; i < hashtable->nbatch; ++i) + for (int i = 0; i < hashtable->nbatch; ++i) { ParallelHashJoinBatch *batch = hashtable->batches[i].shared; diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index cf9e0a74dbf..d929ce34171 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -1981,7 +1981,6 @@ preprocess_grouping_sets(PlannerInfo *root) Query *parse = root->parse; List *sets; int maxref = 0; - ListCell *lc; ListCell *lc_set; grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data)); @@ -2024,6 +2023,7 @@ preprocess_grouping_sets(PlannerInfo *root) if (!bms_is_empty(gd->unsortable_refs)) { List *sortable_sets = NIL; + ListCell *lc; foreach(lc, parse->groupingSets) { diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c index e771a7cd62d..e2d2ec18c90 100644 --- a/src/backend/tsearch/ts_typanalyze.c +++ b/src/backend/tsearch/ts_typanalyze.c @@ -161,7 +161,6 @@ compute_tsvector_stats(VacAttrStats *stats, int vector_no, lexeme_no; LexemeHashKey hash_key; - TrackItem *item; /* * We want statistics_target * 10 lexemes in the MCELEM array. This @@ -240,6 +239,7 @@ compute_tsvector_stats(VacAttrStats *stats, curentryptr = ARRPTR(vector); for (j = 0; j < vector->size; j++) { + TrackItem *item; bool found; /* @@ -296,6 +296,7 @@ compute_tsvector_stats(VacAttrStats *stats, int nonnull_cnt = samplerows - null_cnt; int i; TrackItem **sort_table; + TrackItem *item; int track_len; int cutoff_freq; int minfreq, diff --git a/src/backend/utils/adt/levenshtein.c b/src/backend/utils/adt/levenshtein.c index 3026cc24311..2fdb3b808bd 100644 --- a/src/backend/utils/adt/levenshtein.c +++ b/src/backend/utils/adt/levenshtein.c @@ -81,8 +81,7 @@ varstr_levenshtein(const char *source, int slen, int *prev; int *curr; int *s_char_len = NULL; - int i, - j; + int j; const char *y; /* @@ -217,7 +216,7 @@ varstr_levenshtein(const char *source, int slen, * To transform the first i characters of s into the first 0 characters of * t, we must perform i deletions. */ - for (i = START_COLUMN; i < STOP_COLUMN; i++) + for (int i = START_COLUMN; i < STOP_COLUMN; i++) prev[i] = i * del_c; /* Loop through rows of the notional array */ @@ -226,6 +225,7 @@ varstr_levenshtein(const char *source, int slen, int *temp; const char *x = source; int y_char_len = n != tlen + 1 ? pg_mblen(y) : 1; + int i; #ifdef LEVENSHTEIN_LESS_EQUAL diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c index fbf39dbf303..777fdf0e2e9 100644 --- a/src/backend/utils/adt/rangetypes_gist.c +++ b/src/backend/utils/adt/rangetypes_gist.c @@ -1322,8 +1322,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache, ConsiderSplitContext context; OffsetNumber i, maxoff; - RangeType *range, - *left_range = NULL, + RangeType *left_range = NULL, *right_range = NULL; int common_entries_count; NonEmptyRange *by_lower, @@ -1518,6 +1517,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache, */ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { + RangeType *range; RangeBound lower, upper; bool empty; @@ -1593,6 +1593,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache, */ for (i = 0; i < common_entries_count; i++) { + RangeType *range; int idx = common_entries[i].index; range = DatumGetRangeTypeP(entryvec->vector[idx].key); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 8964f73b929..8280711f7ef 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1615,7 +1615,6 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok) ArrayType *arr; char *enabled; Datum datum; - bool isnull; bool ndistinct_enabled; bool dependencies_enabled; bool mcv_enabled; @@ -1668,6 +1667,8 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok) if (!columns_only) { + bool isnull; + nsp = get_namespace_name_or_temp(statextrec->stxnamespace); appendStringInfo(&buf, "CREATE STATISTICS %s", quote_qualified_identifier(nsp,