1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

Further -Wshadow=compatible-local warning fixes

These should have been included in 421892a19 as these shadowed variable
warnings can also be fixed by adjusting the scope of the shadowed variable
to put the declaration for it in an inner scope.

This is part of the same effort as f01592f91.

By my count, this takes the warning count from 114 down to 106.

Author: David Rowley and Justin Pryzby
Discussion: https://postgr.es/m/CAApHDvrwLGBP%2BYw9vriayyf%3DXR4uPWP5jr6cQhP9au_kaDUhbA%40mail.gmail.com
This commit is contained in:
David Rowley
2022-08-24 22:04:28 +12:00
parent 161355ee6d
commit f959bf9a5b
8 changed files with 15 additions and 12 deletions

View File

@ -395,7 +395,6 @@ moveLeafs(Relation index, SpGistState *state,
size; size;
Buffer nbuf; Buffer nbuf;
Page npage; Page npage;
SpGistLeafTuple it;
OffsetNumber r = InvalidOffsetNumber, OffsetNumber r = InvalidOffsetNumber,
startOffset = InvalidOffsetNumber; startOffset = InvalidOffsetNumber;
bool replaceDead = false; bool replaceDead = false;
@ -467,6 +466,8 @@ moveLeafs(Relation index, SpGistState *state,
{ {
for (i = 0; i < nDelete; i++) for (i = 0; i < nDelete; i++)
{ {
SpGistLeafTuple it;
it = (SpGistLeafTuple) PageGetItem(current->page, it = (SpGistLeafTuple) PageGetItem(current->page,
PageGetItemId(current->page, toDelete[i])); PageGetItemId(current->page, toDelete[i]));
Assert(it->tupstate == SPGIST_LIVE); Assert(it->tupstate == SPGIST_LIVE);

View File

@ -1149,7 +1149,6 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
PartitionDesc partdesc = RelationGetPartitionDesc(rel, true); PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
List *idxs = NIL; List *idxs = NIL;
List *childTbls = NIL; List *childTbls = NIL;
ListCell *l;
int i; int i;
MemoryContext oldcxt, MemoryContext oldcxt,
perChildCxt; perChildCxt;
@ -1181,6 +1180,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
for (i = 0; i < partdesc->nparts; i++) for (i = 0; i < partdesc->nparts; i++)
{ {
Oid indexOnChild = InvalidOid; Oid indexOnChild = InvalidOid;
ListCell *l;
ListCell *l2; ListCell *l2;
CreateTrigStmt *childStmt; CreateTrigStmt *childStmt;
Relation childTbl; Relation childTbl;

View File

@ -1080,7 +1080,6 @@ static void
ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
{ {
ParallelHashJoinState *pstate = hashtable->parallel_state; ParallelHashJoinState *pstate = hashtable->parallel_state;
int i;
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
@ -1244,7 +1243,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
ExecParallelHashTableSetCurrentBatch(hashtable, 0); ExecParallelHashTableSetCurrentBatch(hashtable, 0);
/* Are any of the new generation of batches exhausted? */ /* Are any of the new generation of batches exhausted? */
for (i = 0; i < hashtable->nbatch; ++i) for (int i = 0; i < hashtable->nbatch; ++i)
{ {
ParallelHashJoinBatch *batch = hashtable->batches[i].shared; ParallelHashJoinBatch *batch = hashtable->batches[i].shared;

View File

@ -1981,7 +1981,6 @@ preprocess_grouping_sets(PlannerInfo *root)
Query *parse = root->parse; Query *parse = root->parse;
List *sets; List *sets;
int maxref = 0; int maxref = 0;
ListCell *lc;
ListCell *lc_set; ListCell *lc_set;
grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data)); grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
@ -2024,6 +2023,7 @@ preprocess_grouping_sets(PlannerInfo *root)
if (!bms_is_empty(gd->unsortable_refs)) if (!bms_is_empty(gd->unsortable_refs))
{ {
List *sortable_sets = NIL; List *sortable_sets = NIL;
ListCell *lc;
foreach(lc, parse->groupingSets) foreach(lc, parse->groupingSets)
{ {

View File

@ -161,7 +161,6 @@ compute_tsvector_stats(VacAttrStats *stats,
int vector_no, int vector_no,
lexeme_no; lexeme_no;
LexemeHashKey hash_key; LexemeHashKey hash_key;
TrackItem *item;
/* /*
* We want statistics_target * 10 lexemes in the MCELEM array. This * We want statistics_target * 10 lexemes in the MCELEM array. This
@ -240,6 +239,7 @@ compute_tsvector_stats(VacAttrStats *stats,
curentryptr = ARRPTR(vector); curentryptr = ARRPTR(vector);
for (j = 0; j < vector->size; j++) for (j = 0; j < vector->size; j++)
{ {
TrackItem *item;
bool found; bool found;
/* /*
@ -296,6 +296,7 @@ compute_tsvector_stats(VacAttrStats *stats,
int nonnull_cnt = samplerows - null_cnt; int nonnull_cnt = samplerows - null_cnt;
int i; int i;
TrackItem **sort_table; TrackItem **sort_table;
TrackItem *item;
int track_len; int track_len;
int cutoff_freq; int cutoff_freq;
int minfreq, int minfreq,

View File

@ -81,8 +81,7 @@ varstr_levenshtein(const char *source, int slen,
int *prev; int *prev;
int *curr; int *curr;
int *s_char_len = NULL; int *s_char_len = NULL;
int i, int j;
j;
const char *y; const char *y;
/* /*
@ -217,7 +216,7 @@ varstr_levenshtein(const char *source, int slen,
* To transform the first i characters of s into the first 0 characters of * To transform the first i characters of s into the first 0 characters of
* t, we must perform i deletions. * t, we must perform i deletions.
*/ */
for (i = START_COLUMN; i < STOP_COLUMN; i++) for (int i = START_COLUMN; i < STOP_COLUMN; i++)
prev[i] = i * del_c; prev[i] = i * del_c;
/* Loop through rows of the notional array */ /* Loop through rows of the notional array */
@ -226,6 +225,7 @@ varstr_levenshtein(const char *source, int slen,
int *temp; int *temp;
const char *x = source; const char *x = source;
int y_char_len = n != tlen + 1 ? pg_mblen(y) : 1; int y_char_len = n != tlen + 1 ? pg_mblen(y) : 1;
int i;
#ifdef LEVENSHTEIN_LESS_EQUAL #ifdef LEVENSHTEIN_LESS_EQUAL

View File

@ -1322,8 +1322,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
ConsiderSplitContext context; ConsiderSplitContext context;
OffsetNumber i, OffsetNumber i,
maxoff; maxoff;
RangeType *range, RangeType *left_range = NULL,
*left_range = NULL,
*right_range = NULL; *right_range = NULL;
int common_entries_count; int common_entries_count;
NonEmptyRange *by_lower, NonEmptyRange *by_lower,
@ -1518,6 +1517,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
*/ */
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{ {
RangeType *range;
RangeBound lower, RangeBound lower,
upper; upper;
bool empty; bool empty;
@ -1593,6 +1593,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
*/ */
for (i = 0; i < common_entries_count; i++) for (i = 0; i < common_entries_count; i++)
{ {
RangeType *range;
int idx = common_entries[i].index; int idx = common_entries[i].index;
range = DatumGetRangeTypeP(entryvec->vector[idx].key); range = DatumGetRangeTypeP(entryvec->vector[idx].key);

View File

@ -1615,7 +1615,6 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok)
ArrayType *arr; ArrayType *arr;
char *enabled; char *enabled;
Datum datum; Datum datum;
bool isnull;
bool ndistinct_enabled; bool ndistinct_enabled;
bool dependencies_enabled; bool dependencies_enabled;
bool mcv_enabled; bool mcv_enabled;
@ -1668,6 +1667,8 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok)
if (!columns_only) if (!columns_only)
{ {
bool isnull;
nsp = get_namespace_name_or_temp(statextrec->stxnamespace); nsp = get_namespace_name_or_temp(statextrec->stxnamespace);
appendStringInfo(&buf, "CREATE STATISTICS %s", appendStringInfo(&buf, "CREATE STATISTICS %s",
quote_qualified_identifier(nsp, quote_qualified_identifier(nsp,