mirror of
https://github.com/postgres/postgres.git
synced 2025-05-03 22:24:49 +03:00
Remove shadowed local variables that are new in v15
Compiling with -Wshadow=compatible-local yields quite a few warnings about local variables being shadowed by compatible local variables in an inner scope. Of course, this is perfectly valid in C, but we have had bugs in the past as a result of developers failing to notice this. af7d270dd is a recent example. Here we do a cleanup of warnings we receive from -Wshadow=compatible-local for code which is new to PostgreSQL 15. We've yet to have the discussion about if we actually ever want to run that as a standard compilation flag. We'll need to at least get the number of warnings down to something easier to manage before we can realistically consider if we want this or not. This commit is the first step towards reducing the warnings. The changes being made here are all fairly trivial. Because of that, and the fact that v15 is still in beta, this is being back-patched into 15. It seems more risky not to do this as the risk of future bugs is increased by the additional conflicts that this commit could cause for any future bug fixes touching the same areas as this commit. Author: Justin Pryzby Discussion: https://postgr.es/m/20220817145434.GC26426%40telsasoft.com Backpatch-through: 15
This commit is contained in:
parent
4496020e6d
commit
24f457aa2b
@ -62,7 +62,7 @@ BaseBackupAddTarget(char *name,
|
||||
void *(*check_detail) (char *, char *),
|
||||
bbsink *(*get_sink) (bbsink *, void *))
|
||||
{
|
||||
BaseBackupTargetType *ttype;
|
||||
BaseBackupTargetType *newtype;
|
||||
MemoryContext oldcontext;
|
||||
ListCell *lc;
|
||||
|
||||
@ -96,11 +96,11 @@ BaseBackupAddTarget(char *name,
|
||||
* name into a newly-allocated chunk of memory.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
|
||||
ttype = palloc(sizeof(BaseBackupTargetType));
|
||||
ttype->name = pstrdup(name);
|
||||
ttype->check_detail = check_detail;
|
||||
ttype->get_sink = get_sink;
|
||||
BaseBackupTargetTypeList = lappend(BaseBackupTargetTypeList, ttype);
|
||||
newtype = palloc(sizeof(BaseBackupTargetType));
|
||||
newtype->name = pstrdup(name);
|
||||
newtype->check_detail = check_detail;
|
||||
newtype->get_sink = get_sink;
|
||||
BaseBackupTargetTypeList = lappend(BaseBackupTargetTypeList, newtype);
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
}
|
||||
|
||||
|
@ -341,13 +341,13 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
|
||||
/* transform all nested columns into cross/union join */
|
||||
foreach(lc, columns)
|
||||
{
|
||||
JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc));
|
||||
JsonTableColumn *col = castNode(JsonTableColumn, lfirst(lc));
|
||||
Node *node;
|
||||
|
||||
if (jtc->coltype != JTC_NESTED)
|
||||
if (col->coltype != JTC_NESTED)
|
||||
continue;
|
||||
|
||||
node = transformNestedJsonTableColumn(cxt, jtc, plan);
|
||||
node = transformNestedJsonTableColumn(cxt, col, plan);
|
||||
|
||||
/* join transformed node with previous sibling nodes */
|
||||
res = res ? makeJsonTableSiblingJoin(cross, res, node) : node;
|
||||
|
@ -707,7 +707,6 @@ fetch_remote_table_info(char *nspname, char *relname,
|
||||
bool isnull;
|
||||
int natt;
|
||||
ListCell *lc;
|
||||
bool first;
|
||||
Bitmapset *included_cols = NULL;
|
||||
|
||||
lrel->nspname = nspname;
|
||||
@ -759,18 +758,15 @@ fetch_remote_table_info(char *nspname, char *relname,
|
||||
if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 150000)
|
||||
{
|
||||
WalRcvExecResult *pubres;
|
||||
TupleTableSlot *slot;
|
||||
TupleTableSlot *tslot;
|
||||
Oid attrsRow[] = {INT2VECTOROID};
|
||||
StringInfoData pub_names;
|
||||
bool first = true;
|
||||
|
||||
initStringInfo(&pub_names);
|
||||
foreach(lc, MySubscription->publications)
|
||||
{
|
||||
if (!first)
|
||||
if (foreach_current_index(lc) > 0)
|
||||
appendStringInfo(&pub_names, ", ");
|
||||
appendStringInfoString(&pub_names, quote_literal_cstr(strVal(lfirst(lc))));
|
||||
first = false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -819,10 +815,10 @@ fetch_remote_table_info(char *nspname, char *relname,
|
||||
* If we find a NULL value, it means all the columns should be
|
||||
* replicated.
|
||||
*/
|
||||
slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
|
||||
if (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot))
|
||||
tslot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
|
||||
if (tuplestore_gettupleslot(pubres->tuplestore, true, false, tslot))
|
||||
{
|
||||
Datum cfval = slot_getattr(slot, 1, &isnull);
|
||||
Datum cfval = slot_getattr(tslot, 1, &isnull);
|
||||
|
||||
if (!isnull)
|
||||
{
|
||||
@ -838,9 +834,9 @@ fetch_remote_table_info(char *nspname, char *relname,
|
||||
included_cols = bms_add_member(included_cols, elems[natt]);
|
||||
}
|
||||
|
||||
ExecClearTuple(slot);
|
||||
ExecClearTuple(tslot);
|
||||
}
|
||||
ExecDropSingleTupleTableSlot(slot);
|
||||
ExecDropSingleTupleTableSlot(tslot);
|
||||
|
||||
walrcv_clear_result(pubres);
|
||||
|
||||
@ -950,14 +946,11 @@ fetch_remote_table_info(char *nspname, char *relname,
|
||||
|
||||
/* Build the pubname list. */
|
||||
initStringInfo(&pub_names);
|
||||
first = true;
|
||||
foreach(lc, MySubscription->publications)
|
||||
{
|
||||
char *pubname = strVal(lfirst(lc));
|
||||
|
||||
if (first)
|
||||
first = false;
|
||||
else
|
||||
if (foreach_current_index(lc) > 0)
|
||||
appendStringInfoString(&pub_names, ", ");
|
||||
|
||||
appendStringInfoString(&pub_names, quote_literal_cstr(pubname));
|
||||
|
@ -3109,10 +3109,10 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
|
||||
|
||||
if (JsonContainerIsScalar(&jb->root))
|
||||
{
|
||||
bool res PG_USED_FOR_ASSERTS_ONLY;
|
||||
bool result PG_USED_FOR_ASSERTS_ONLY;
|
||||
|
||||
res = JsonbExtractScalar(&jb->root, jbv);
|
||||
Assert(res);
|
||||
result = JsonbExtractScalar(&jb->root, jbv);
|
||||
Assert(result);
|
||||
}
|
||||
else
|
||||
JsonbInitBinary(jbv, jb);
|
||||
|
@ -3142,10 +3142,10 @@ dumpDatabase(Archive *fout)
|
||||
PQExpBuffer loFrozenQry = createPQExpBuffer();
|
||||
PQExpBuffer loOutQry = createPQExpBuffer();
|
||||
PQExpBuffer loHorizonQry = createPQExpBuffer();
|
||||
int i_relfrozenxid,
|
||||
i_relfilenode,
|
||||
i_oid,
|
||||
i_relminmxid;
|
||||
int ii_relfrozenxid,
|
||||
ii_relfilenode,
|
||||
ii_oid,
|
||||
ii_relminmxid;
|
||||
|
||||
/*
|
||||
* pg_largeobject
|
||||
@ -3163,10 +3163,10 @@ dumpDatabase(Archive *fout)
|
||||
|
||||
lo_res = ExecuteSqlQuery(fout, loFrozenQry->data, PGRES_TUPLES_OK);
|
||||
|
||||
i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
|
||||
i_relminmxid = PQfnumber(lo_res, "relminmxid");
|
||||
i_relfilenode = PQfnumber(lo_res, "relfilenode");
|
||||
i_oid = PQfnumber(lo_res, "oid");
|
||||
ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
|
||||
ii_relminmxid = PQfnumber(lo_res, "relminmxid");
|
||||
ii_relfilenode = PQfnumber(lo_res, "relfilenode");
|
||||
ii_oid = PQfnumber(lo_res, "oid");
|
||||
|
||||
appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
|
||||
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
|
||||
@ -3178,12 +3178,12 @@ dumpDatabase(Archive *fout)
|
||||
appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
|
||||
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
|
||||
"WHERE oid = %u;\n",
|
||||
atooid(PQgetvalue(lo_res, i, i_relfrozenxid)),
|
||||
atooid(PQgetvalue(lo_res, i, i_relminmxid)),
|
||||
atooid(PQgetvalue(lo_res, i, i_oid)));
|
||||
atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)),
|
||||
atooid(PQgetvalue(lo_res, i, ii_relminmxid)),
|
||||
atooid(PQgetvalue(lo_res, i, ii_oid)));
|
||||
|
||||
oid = atooid(PQgetvalue(lo_res, i, i_oid));
|
||||
relfilenode = atooid(PQgetvalue(lo_res, i, i_relfilenode));
|
||||
oid = atooid(PQgetvalue(lo_res, i, ii_oid));
|
||||
relfilenode = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
|
||||
|
||||
if (oid == LargeObjectRelationId)
|
||||
appendPQExpBuffer(loOutQry,
|
||||
@ -7072,21 +7072,21 @@ getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
|
||||
appendPQExpBufferChar(tbloids, '{');
|
||||
for (int i = 0; i < numTables; i++)
|
||||
{
|
||||
TableInfo *tbinfo = &tblinfo[i];
|
||||
TableInfo *tinfo = &tblinfo[i];
|
||||
|
||||
/*
|
||||
* For partitioned tables, foreign keys have no triggers so they must
|
||||
* be included anyway in case some foreign keys are defined.
|
||||
*/
|
||||
if ((!tbinfo->hastriggers &&
|
||||
tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
|
||||
!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
|
||||
if ((!tinfo->hastriggers &&
|
||||
tinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
|
||||
!(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
|
||||
continue;
|
||||
|
||||
/* OK, we need info for this table */
|
||||
if (tbloids->len > 1) /* do we have more than the '{'? */
|
||||
appendPQExpBufferChar(tbloids, ',');
|
||||
appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
|
||||
appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
|
||||
}
|
||||
appendPQExpBufferChar(tbloids, '}');
|
||||
|
||||
@ -16799,7 +16799,7 @@ dumpSequence(Archive *fout, const TableInfo *tbinfo)
|
||||
*/
|
||||
if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
|
||||
{
|
||||
TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
|
||||
owning_tab = findTableByOid(tbinfo->owning_tab);
|
||||
|
||||
if (owning_tab == NULL)
|
||||
pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
|
||||
|
Loading…
x
Reference in New Issue
Block a user