aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Rowley <drowley@postgresql.org>2022-08-20 11:40:44 +1200
committerDavid Rowley <drowley@postgresql.org>2022-08-20 11:40:44 +1200
commitf01592f9157707c4de1f00a0e0dc5a7e8fa8f1d5 (patch)
tree1995e280a06a798da836da8442464fc4fb9f149a
parent3097bde7dd1d9e7b1127b24935e70ca4609e22b2 (diff)
downloadpostgresql-f01592f9157707c4de1f00a0e0dc5a7e8fa8f1d5.tar.gz
postgresql-f01592f9157707c4de1f00a0e0dc5a7e8fa8f1d5.zip
Remove shadowed local variables that are new in v15
Compiling with -Wshadow=compatible-local yields quite a few warnings about local variables being shadowed by compatible local variables in an inner scope. Of course, this is perfectly valid in C, but we have had bugs in the past as a result of developers failing to notice this. af7d270dd is a recent example. Here we do a cleanup of warnings we receive from -Wshadow=compatible-local for code which is new to PostgreSQL 15. We've yet to have the discussion about if we actually ever want to run that as a standard compilation flag. We'll need to at least get the number of warnings down to something easier to manage before we can realistically consider if we want this or not. This commit is the first step towards reducing the warnings. The changes being made here are all fairly trivial. Because of that, and the fact that v15 is still in beta, this is being back-patched into 15. It seems more risky not to do this as the risk of future bugs is increased by the additional conflicts that this commit could cause for any future bug fixes touching the same areas as this commit. Author: Justin Pryzby Discussion: https://postgr.es/m/20220817145434.GC26426%40telsasoft.com Backpatch-through: 15
-rw-r--r--src/backend/backup/basebackup_target.c12
-rw-r--r--src/backend/parser/parse_jsontable.c6
-rw-r--r--src/backend/replication/logical/tablesync.c23
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c6
-rw-r--r--src/bin/pg_dump/pg_dump.c38
5 files changed, 39 insertions, 46 deletions
diff --git a/src/backend/backup/basebackup_target.c b/src/backend/backup/basebackup_target.c
index 83928e32055..f280660a03f 100644
--- a/src/backend/backup/basebackup_target.c
+++ b/src/backend/backup/basebackup_target.c
@@ -62,7 +62,7 @@ BaseBackupAddTarget(char *name,
void *(*check_detail) (char *, char *),
bbsink *(*get_sink) (bbsink *, void *))
{
- BaseBackupTargetType *ttype;
+ BaseBackupTargetType *newtype;
MemoryContext oldcontext;
ListCell *lc;
@@ -96,11 +96,11 @@ BaseBackupAddTarget(char *name,
* name into a newly-allocated chunk of memory.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
- ttype = palloc(sizeof(BaseBackupTargetType));
- ttype->name = pstrdup(name);
- ttype->check_detail = check_detail;
- ttype->get_sink = get_sink;
- BaseBackupTargetTypeList = lappend(BaseBackupTargetTypeList, ttype);
+ newtype = palloc(sizeof(BaseBackupTargetType));
+ newtype->name = pstrdup(name);
+ newtype->check_detail = check_detail;
+ newtype->get_sink = get_sink;
+ BaseBackupTargetTypeList = lappend(BaseBackupTargetTypeList, newtype);
MemoryContextSwitchTo(oldcontext);
}
diff --git a/src/backend/parser/parse_jsontable.c b/src/backend/parser/parse_jsontable.c
index bc3272017ef..3e94071248e 100644
--- a/src/backend/parser/parse_jsontable.c
+++ b/src/backend/parser/parse_jsontable.c
@@ -341,13 +341,13 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
/* transform all nested columns into cross/union join */
foreach(lc, columns)
{
- JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc));
+ JsonTableColumn *col = castNode(JsonTableColumn, lfirst(lc));
Node *node;
- if (jtc->coltype != JTC_NESTED)
+ if (col->coltype != JTC_NESTED)
continue;
- node = transformNestedJsonTableColumn(cxt, jtc, plan);
+ node = transformNestedJsonTableColumn(cxt, col, plan);
/* join transformed node with previous sibling nodes */
res = res ? makeJsonTableSiblingJoin(cross, res, node) : node;
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index bfcb80b4955..d37d8a0d74a 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -707,7 +707,6 @@ fetch_remote_table_info(char *nspname, char *relname,
bool isnull;
int natt;
ListCell *lc;
- bool first;
Bitmapset *included_cols = NULL;
lrel->nspname = nspname;
@@ -759,18 +758,15 @@ fetch_remote_table_info(char *nspname, char *relname,
if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 150000)
{
WalRcvExecResult *pubres;
- TupleTableSlot *slot;
+ TupleTableSlot *tslot;
Oid attrsRow[] = {INT2VECTOROID};
StringInfoData pub_names;
- bool first = true;
-
initStringInfo(&pub_names);
foreach(lc, MySubscription->publications)
{
- if (!first)
+ if (foreach_current_index(lc) > 0)
appendStringInfo(&pub_names, ", ");
appendStringInfoString(&pub_names, quote_literal_cstr(strVal(lfirst(lc))));
- first = false;
}
/*
@@ -819,10 +815,10 @@ fetch_remote_table_info(char *nspname, char *relname,
* If we find a NULL value, it means all the columns should be
* replicated.
*/
- slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
- if (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot))
+ tslot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
+ if (tuplestore_gettupleslot(pubres->tuplestore, true, false, tslot))
{
- Datum cfval = slot_getattr(slot, 1, &isnull);
+ Datum cfval = slot_getattr(tslot, 1, &isnull);
if (!isnull)
{
@@ -838,9 +834,9 @@ fetch_remote_table_info(char *nspname, char *relname,
included_cols = bms_add_member(included_cols, elems[natt]);
}
- ExecClearTuple(slot);
+ ExecClearTuple(tslot);
}
- ExecDropSingleTupleTableSlot(slot);
+ ExecDropSingleTupleTableSlot(tslot);
walrcv_clear_result(pubres);
@@ -950,14 +946,11 @@ fetch_remote_table_info(char *nspname, char *relname,
/* Build the pubname list. */
initStringInfo(&pub_names);
- first = true;
foreach(lc, MySubscription->publications)
{
char *pubname = strVal(lfirst(lc));
- if (first)
- first = false;
- else
+ if (foreach_current_index(lc) > 0)
appendStringInfoString(&pub_names, ", ");
appendStringInfoString(&pub_names, quote_literal_cstr(pubname));
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 5b6a4805721..9c381ae7271 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -3109,10 +3109,10 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
if (JsonContainerIsScalar(&jb->root))
{
- bool res PG_USED_FOR_ASSERTS_ONLY;
+ bool result PG_USED_FOR_ASSERTS_ONLY;
- res = JsonbExtractScalar(&jb->root, jbv);
- Assert(res);
+ result = JsonbExtractScalar(&jb->root, jbv);
+ Assert(result);
}
else
JsonbInitBinary(jbv, jb);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index da6605175a0..2c689157329 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -3142,10 +3142,10 @@ dumpDatabase(Archive *fout)
PQExpBuffer loFrozenQry = createPQExpBuffer();
PQExpBuffer loOutQry = createPQExpBuffer();
PQExpBuffer loHorizonQry = createPQExpBuffer();
- int i_relfrozenxid,
- i_relfilenode,
- i_oid,
- i_relminmxid;
+ int ii_relfrozenxid,
+ ii_relfilenode,
+ ii_oid,
+ ii_relminmxid;
/*
* pg_largeobject
@@ -3163,10 +3163,10 @@ dumpDatabase(Archive *fout)
lo_res = ExecuteSqlQuery(fout, loFrozenQry->data, PGRES_TUPLES_OK);
- i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
- i_relminmxid = PQfnumber(lo_res, "relminmxid");
- i_relfilenode = PQfnumber(lo_res, "relfilenode");
- i_oid = PQfnumber(lo_res, "oid");
+ ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
+ ii_relminmxid = PQfnumber(lo_res, "relminmxid");
+ ii_relfilenode = PQfnumber(lo_res, "relfilenode");
+ ii_oid = PQfnumber(lo_res, "oid");
appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
@@ -3178,12 +3178,12 @@ dumpDatabase(Archive *fout)
appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
"WHERE oid = %u;\n",
- atooid(PQgetvalue(lo_res, i, i_relfrozenxid)),
- atooid(PQgetvalue(lo_res, i, i_relminmxid)),
- atooid(PQgetvalue(lo_res, i, i_oid)));
+ atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)),
+ atooid(PQgetvalue(lo_res, i, ii_relminmxid)),
+ atooid(PQgetvalue(lo_res, i, ii_oid)));
- oid = atooid(PQgetvalue(lo_res, i, i_oid));
- relfilenumber = atooid(PQgetvalue(lo_res, i, i_relfilenode));
+ oid = atooid(PQgetvalue(lo_res, i, ii_oid));
+ relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
if (oid == LargeObjectRelationId)
appendPQExpBuffer(loOutQry,
@@ -7081,21 +7081,21 @@ getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
appendPQExpBufferChar(tbloids, '{');
for (int i = 0; i < numTables; i++)
{
- TableInfo *tbinfo = &tblinfo[i];
+ TableInfo *tinfo = &tblinfo[i];
/*
* For partitioned tables, foreign keys have no triggers so they must
* be included anyway in case some foreign keys are defined.
*/
- if ((!tbinfo->hastriggers &&
- tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
- !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
+ if ((!tinfo->hastriggers &&
+ tinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
+ !(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
continue;
/* OK, we need info for this table */
if (tbloids->len > 1) /* do we have more than the '{'? */
appendPQExpBufferChar(tbloids, ',');
- appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
+ appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
}
appendPQExpBufferChar(tbloids, '}');
@@ -16800,7 +16800,7 @@ dumpSequence(Archive *fout, const TableInfo *tbinfo)
*/
if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
{
- TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
+ owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab == NULL)
pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",