aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/gin/ginbtree.c2
-rw-r--r--src/backend/access/heap/pruneheap.c6
-rw-r--r--src/backend/access/nbtree/nbtutils.c4
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c2
-rw-r--r--src/backend/catalog/pg_constraint.c2
-rw-r--r--src/backend/catalog/system_functions.sql2
-rw-r--r--src/backend/commands/amcmds.c2
-rw-r--r--src/backend/commands/copyfrom.c2
-rw-r--r--src/backend/commands/dbcommands.c4
-rw-r--r--src/backend/commands/tablecmds.c2
-rw-r--r--src/backend/commands/vacuumparallel.c2
-rw-r--r--src/backend/executor/execExpr.c2
-rw-r--r--src/backend/optimizer/path/equivclass.c2
-rw-r--r--src/backend/optimizer/path/pathkeys.c2
-rw-r--r--src/backend/optimizer/prep/prepunion.c3
-rw-r--r--src/backend/parser/parse_jsontable.c2
-rw-r--r--src/backend/parser/parse_utilcmd.c4
-rw-r--r--src/backend/partitioning/partbounds.c6
-rw-r--r--src/backend/partitioning/partprune.c2
-rw-r--r--src/backend/postmaster/launch_backend.c2
-rw-r--r--src/backend/postmaster/walsummarizer.c2
-rw-r--r--src/backend/replication/logical/slotsync.c2
-rw-r--r--src/backend/replication/walsender.c2
-rw-r--r--src/backend/statistics/dependencies.c4
-rw-r--r--src/backend/storage/aio/read_stream.c6
-rw-r--r--src/backend/storage/buffer/bufmgr.c2
-rw-r--r--src/backend/storage/lmgr/lock.c2
-rw-r--r--src/backend/storage/lmgr/proc.c2
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c4
-rw-r--r--src/backend/utils/adt/multirangetypes.c2
-rw-r--r--src/backend/utils/adt/selfuncs.c2
-rw-r--r--src/backend/utils/mmgr/aset.c4
-rw-r--r--src/backend/utils/mmgr/bump.c4
-rw-r--r--src/backend/utils/mmgr/generation.c4
-rw-r--r--src/bin/pg_basebackup/bbstreamer_tar.c2
-rw-r--r--src/bin/pg_combinebackup/pg_combinebackup.c2
-rw-r--r--src/bin/pg_combinebackup/reconstruct.c2
-rw-r--r--src/bin/pg_upgrade/t/004_subscription.pl2
-rw-r--r--src/common/unicode_category.c2
-rw-r--r--src/include/access/heapam_xlog.h10
-rw-r--r--src/include/common/hashfn_unstable.h2
-rw-r--r--src/include/lib/radixtree.h6
-rw-r--r--src/include/nodes/pathnodes.h2
-rw-r--r--src/include/nodes/primnodes.h2
-rw-r--r--src/include/storage/proc.h2
-rw-r--r--src/interfaces/libpq/fe-cancel.c2
-rw-r--r--src/interfaces/libpq/fe-secure-openssl.c2
-rw-r--r--src/test/isolation/expected/temp-schema-cleanup.out4
-rw-r--r--src/test/isolation/specs/temp-schema-cleanup.spec2
-rw-r--r--src/test/modules/test_resowner/test_resowner_many.c2
-rw-r--r--src/test/regress/expected/aggregates.out2
-rw-r--r--src/test/regress/expected/copy.out4
-rw-r--r--src/test/regress/expected/foreign_key.out2
-rw-r--r--src/test/regress/expected/publication.out4
-rw-r--r--src/test/regress/expected/tsdicts.out2
-rw-r--r--src/test/regress/sql/aggregates.sql2
-rw-r--r--src/test/regress/sql/copy.sql4
-rw-r--r--src/test/regress/sql/foreign_key.sql2
-rw-r--r--src/test/regress/sql/publication.sql4
-rw-r--r--src/test/regress/sql/tsdicts.sql2
-rw-r--r--src/test/subscription/t/004_sync.pl2
-rw-r--r--src/test/subscription/t/026_stats.pl2
62 files changed, 88 insertions, 87 deletions
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 86f938686c3..b7a5013896a 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -766,7 +766,7 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
/*
* An entry point to ginFinishSplit() that is used when we stumble upon an
* existing incompletely split page in the tree, as opposed to completing a
- * split that we just made outselves. The difference is that stack->buffer may
+ * split that we just made ourselves. The difference is that stack->buffer may
* be merely share-locked on entry, and will be upgraded to exclusive mode.
*
* Note: Upgrading the lock momentarily releases it. Doing that in a scan
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index d2eecaf7ebc..3cdfc5b7f1b 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -33,7 +33,7 @@
typedef struct
{
/*-------------------------------------------------------
- * Arguments passed to heap_page_and_freeze()
+ * Arguments passed to heap_page_prune_and_freeze()
*-------------------------------------------------------
*/
@@ -306,7 +306,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* If the HEAP_PRUNE_FREEZE option is set, we will also freeze tuples if it's
* required in order to advance relfrozenxid / relminmxid, or if it's
* considered advantageous for overall system performance to do so now. The
- * 'cutoffs', 'presult', 'new_refrozen_xid' and 'new_relmin_mxid' arguments
+ * 'cutoffs', 'presult', 'new_relfrozen_xid' and 'new_relmin_mxid' arguments
* are required when freezing. When HEAP_PRUNE_FREEZE option is set, we also
* set presult->all_visible and presult->all_frozen on exit, to indicate if
* the VM bits can be set. They are always set to false when the
@@ -337,7 +337,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* off_loc is the offset location required by the caller to use in error
* callback.
*
- * new_relfrozen_xid and new_relmin_xid must provided by the caller if the
+ * new_relfrozen_xid and new_relmin_mxid must provided by the caller if the
* HEAP_PRUNE_FREEZE option is set. On entry, they contain the oldest XID and
* multi-XID seen on the relation so far. They will be updated with oldest
* values present on the page after pruning. After processing the whole
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 498b8d20358..ecbbc2466d2 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -1756,7 +1756,7 @@ _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
*
* (The rules are the same for backwards scans, except that the operators are
* flipped: just replace the precondition's >= operator with a <=, and the
- * postcondition's <= operator with with a >=. In other words, just swap the
+ * postcondition's <= operator with a >=. In other words, just swap the
* precondition with the postcondition.)
*
* We also deal with "advancing" non-required arrays here. Callers whose
@@ -4133,7 +4133,7 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
else
{
/*
- * Failure -- "ahead" tuple is too far ahead (we were too aggresive).
+ * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
*
* Reset the number of rechecks, and aggressively reduce the target
* distance (we're much more aggressive here than we were when the
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 41b842d80ec..dccca201e05 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -25,7 +25,7 @@
* Parse the WAL format of an xact commit and abort records into an easier to
* understand format.
*
- * This routines are in xactdesc.c because they're accessed in backend (when
+ * These routines are in xactdesc.c because they're accessed in backend (when
* replaying WAL) and frontend (pg_waldump) code. This file is the only xact
* specific one shared between both. They're complicated enough that
* duplication would be bothersome.
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 778b7c381df..45a99af774e 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -1668,7 +1668,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks,
}
/*
- * FindFkPeriodOpers -
+ * FindFKPeriodOpers -
*
* Looks up the operator oids used for the PERIOD part of a temporal foreign key.
* The opclass should be the opclass of that PERIOD element.
diff --git a/src/backend/catalog/system_functions.sql b/src/backend/catalog/system_functions.sql
index fe2bb50f46d..ae099e328c2 100644
--- a/src/backend/catalog/system_functions.sql
+++ b/src/backend/catalog/system_functions.sql
@@ -5,7 +5,7 @@
*
* src/backend/catalog/system_functions.sql
*
- * This file redefines certain built-in functions that it's impractical
+ * This file redefines certain built-in functions that are impractical
* to fully define in pg_proc.dat. In most cases that's because they use
* SQL-standard function bodies and/or default expressions. The node
* tree representations of those are too unreadable, platform-dependent,
diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c
index 10e386288a6..aaa0f9a1dc8 100644
--- a/src/backend/commands/amcmds.c
+++ b/src/backend/commands/amcmds.c
@@ -167,7 +167,7 @@ get_index_am_oid(const char *amname, bool missing_ok)
/*
* get_table_am_oid - given an access method name, look up its OID
- * and verify it corresponds to an table AM.
+ * and verify it corresponds to a table AM.
*/
Oid
get_table_am_oid(const char *amname, bool missing_ok)
diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c
index 06bc14636d3..ce4d62e707c 100644
--- a/src/backend/commands/copyfrom.c
+++ b/src/backend/commands/copyfrom.c
@@ -996,7 +996,7 @@ CopyFrom(CopyFromState cstate)
cstate->escontext->error_occurred)
{
/*
- * Soft error occured, skip this tuple and deal with error
+ * Soft error occurred, skip this tuple and deal with error
* information according to ON_ERROR.
*/
if (cstate->opts.on_error == COPY_ON_ERROR_IGNORE)
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 65464fac8e5..8229dfa1f22 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -3312,7 +3312,7 @@ dbase_redo(XLogReaderState *record)
*/
FlushDatabaseBuffers(xlrec->src_db_id);
- /* Close all sgmr fds in all backends. */
+ /* Close all smgr fds in all backends. */
WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE));
/*
@@ -3378,7 +3378,7 @@ dbase_redo(XLogReaderState *record)
/* Clean out the xlog relcache too */
XLogDropDatabase(xlrec->db_id);
- /* Close all sgmr fds in all backends. */
+ /* Close all smgr fds in all backends. */
WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE));
for (i = 0; i < xlrec->ntablespaces; i++)
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index f72b2dcadfb..fbffaef1966 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -5687,7 +5687,7 @@ ATParseTransformCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
case AT_AddIndex:
/*
- * A primary key on a inheritance parent needs supporting NOT
+ * A primary key on an inheritance parent needs supporting NOT
* NULL constraint on its children; enqueue commands to create
* those or mark them inherited if they already exist.
*/
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 5174a4e9753..f26070bff2a 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -8,7 +8,7 @@
*
* In a parallel vacuum, we perform both index bulk deletion and index cleanup
* with parallel worker processes. Individual indexes are processed by one
- * vacuum process. ParalleVacuumState contains shared information as well as
+ * vacuum process. ParallelVacuumState contains shared information as well as
* the memory space for storing dead items allocated in the DSA area. We
* launch parallel worker processes at the start of parallel index
* bulk-deletion and index cleanup and once all indexes are processed, the
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index 79087cc6d63..eb5ac208248 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -4400,7 +4400,7 @@ ExecInitJsonExpr(JsonExpr *jsexpr, ExprState *state,
/*
* Add a special step, if needed, to check if the coercion evaluation ran
* into an error but was not thrown because the ON ERROR behavior is not
- * ERROR. It will set jsesestate->error if an error did occur.
+ * ERROR. It will set jsestate->error if an error did occur.
*/
if (jsestate->jump_eval_coercion >= 0 && escontext != NULL)
{
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index 1d6bedb399a..21ce1ae2e13 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -2885,7 +2885,7 @@ add_child_join_rel_equivalences(PlannerInfo *root,
/*
* add_setop_child_rel_equivalences
* Add equivalence members for each non-resjunk target in 'child_tlist'
- * to the EquivalenceClass in the corresponding setop_pathkey's pk_class.
+ * to the EquivalenceClass in the corresponding setop_pathkey's pk_eclass.
*
* 'root' is the PlannerInfo belonging to the top-level set operation.
* 'child_rel' is the RelOptInfo of the child relation we're adding
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 1d61881a6b6..8b258cbef92 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -384,7 +384,7 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
* *group_pathkeys containing grouping pathkeys altogether with aggregate
* pathkeys. If we process aggregate pathkeys we could get an invalid
* result of get_sortgroupref_clause_noerr(), because their
- * pathkey->pk_eclass->ec_sortref doesn't referece query targetlist. So,
+ * pathkey->pk_eclass->ec_sortref doesn't reference query targetlist. So,
* we allocate a separate list of pathkeys for lookups.
*/
grouping_pathkeys = list_copy_head(*group_pathkeys, num_groupby_pathkeys);
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index afcb5c0f0f0..3f14e90a45b 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -214,7 +214,8 @@ set_operation_ordered_results_useful(SetOperationStmt *setop)
*
* Returns a RelOptInfo for the subtree, as well as these output parameters:
* *pTargetList: receives the fully-fledged tlist for the subtree's top plan
- * *istrivial_tlist: true iif datatypes between parent and child match.
+ * *istrivial_tlist: true if, and only if, datatypes between parent and child
+ * match.
*
* The pTargetList output parameter is mostly redundant with the pathtarget
* of the returned RelOptInfo, but for the moment we need it because much of
diff --git a/src/backend/parser/parse_jsontable.c b/src/backend/parser/parse_jsontable.c
index 37f2cba0ef0..b2519c2f329 100644
--- a/src/backend/parser/parse_jsontable.c
+++ b/src/backend/parser/parse_jsontable.c
@@ -70,7 +70,7 @@ static JsonTablePlan *makeJsonTableSiblingJoin(JsonTablePlan *lplan,
* (jt->context_item) and the column-generating expressions (jt->columns) to
* populate TableFunc.docexpr and TableFunc.colvalexprs, respectively. Also,
* the PASSING values (jt->passing) are transformed and added into
- * TableFunc.passvalexprs.
+ * TableFunc.passingvalexprs.
*/
ParseNamespaceItem *
transformJsonTable(ParseState *pstate, JsonTable *jt)
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 7fd8fbc0b4b..fef084f5d52 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -3451,7 +3451,7 @@ checkPartition(Relation rel, Oid partRelOid)
/*
* transformPartitionCmdForSplit
- * Analyze the ALTER TABLLE ... SPLIT PARTITION command
+ * Analyze the ALTER TABLE ... SPLIT PARTITION command
*
* For each new partition sps->bound is set to the transformed value of bound.
* Does checks for bounds of new partitions.
@@ -3490,7 +3490,7 @@ transformPartitionCmdForSplit(CreateStmtContext *cxt, PartitionCmd *partcmd)
/*
* transformPartitionCmdForMerge
- * Analyze the ALTER TABLLE ... MERGE PARTITIONS command
+ * Analyze the ALTER TABLE ... MERGE PARTITIONS command
*
* Does simple checks for merged partitions. Calculates bound of resulting
* partition.
diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c
index 0dbacf39c05..b08edf87a69 100644
--- a/src/backend/partitioning/partbounds.c
+++ b/src/backend/partitioning/partbounds.c
@@ -5146,7 +5146,7 @@ get_partition_bound_spec(Oid partOid, RangeVar *name)
* the first of new partitions) then lower bound of "spec" should be equal (or
* greater than or equal in case defaultPart=true) to lower bound of split
* partition. If last=true (this means that "spec" is the last of new
- * partitions) then upper bound of of "spec" should be equal (or less than or
+ * partitions) then upper bound of "spec" should be equal (or less than or
* equal in case defaultPart=true) to upper bound of split partition.
*
* parent: partitioned table
@@ -5245,8 +5245,8 @@ check_partition_bounds_for_split_range(Relation parent,
false, split_upper);
/*
- * Upper bound of of "spec" should be equal (or less than or equal
- * in case defaultPart=true) to upper bound of split partition.
+ * Upper bound of "spec" should be equal (or less than or equal in
+ * case defaultPart=true) to upper bound of split partition.
*/
if ((!defaultPart && cmpval) || (defaultPart && cmpval > 0))
overlap = true;
diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c
index 9006afd9d21..9a1a7faac7a 100644
--- a/src/backend/partitioning/partprune.c
+++ b/src/backend/partitioning/partprune.c
@@ -1825,7 +1825,7 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
BooleanTest *new_booltest = (BooleanTest *) copyObject(clause);
NullTest *nulltest;
- /* We expect 'noteq' to only be set to true for BooleanTests */
+ /* We expect 'notclause' to only be set to true for BooleanTests */
Assert(IsA(clause, BooleanTest));
/* reverse the bool test */
diff --git a/src/backend/postmaster/launch_backend.c b/src/backend/postmaster/launch_backend.c
index cb0c3e2f8ab..4e9dde1517b 100644
--- a/src/backend/postmaster/launch_backend.c
+++ b/src/backend/postmaster/launch_backend.c
@@ -187,7 +187,7 @@ child_process_kind child_process_kinds[] = {
/*
* WAL senders start their life as regular backend processes, and change
* their type after authenticating the client for replication. We list it
- * here forPostmasterChildName() but cannot launch them directly.
+ * here for PostmasterChildName() but cannot launch them directly.
*/
[B_WAL_SENDER] = {"wal sender", NULL, true},
[B_SLOTSYNC_WORKER] = {"slot sync worker", ReplSlotSyncWorkerMain, true},
diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c
index 0cd5080fa78..72f6c04478d 100644
--- a/src/backend/postmaster/walsummarizer.c
+++ b/src/backend/postmaster/walsummarizer.c
@@ -108,7 +108,7 @@ static WalSummarizerData *WalSummarizerCtl;
/*
* When we reach end of WAL and need to read more, we sleep for a number of
- * milliseconds that is a integer multiple of MS_PER_SLEEP_QUANTUM. This is
+ * milliseconds that is an integer multiple of MS_PER_SLEEP_QUANTUM. This is
* the multiplier. It should vary between 1 and MAX_SLEEP_QUANTA, depending
* on system activity. See summarizer_wait_for_wal() for how we adjust this.
*/
diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c
index bda0de52db9..cb39adcd0ea 100644
--- a/src/backend/replication/logical/slotsync.c
+++ b/src/backend/replication/logical/slotsync.c
@@ -88,7 +88,7 @@
* overwrites.
*
* The 'last_start_time' is needed by postmaster to start the slot sync worker
- * once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where a immediate restart
+ * once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where an immediate restart
* is expected (e.g., slot sync GUCs change), slot sync worker will reset
* last_start_time before exiting, so that postmaster can start the worker
* without waiting for SLOTSYNC_RESTART_INTERVAL_SEC.
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index bc40c454de4..9bf7c67f37d 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -3493,7 +3493,7 @@ WalSndDone(WalSndSendDataCallback send_data)
* Returns the latest point in WAL that has been safely flushed to disk.
* This should only be called when in recovery.
*
- * This is called either by cascading walsender to find WAL postion to be sent
+ * This is called either by cascading walsender to find WAL position to be sent
* to a cascaded standby or by slot synchronization operation to validate remote
* slot's lsn before syncing it locally.
*
diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c
index 5eda06839ea..8d01a93b309 100644
--- a/src/backend/statistics/dependencies.c
+++ b/src/backend/statistics/dependencies.c
@@ -794,7 +794,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
}
else if (IsA(clause, ScalarArrayOpExpr))
{
- /* If it's an scalar array operator, check for Var IN Const. */
+ /* If it's a scalar array operator, check for Var IN Const. */
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/*
@@ -1222,7 +1222,7 @@ dependency_is_compatible_expression(Node *clause, Index relid, List *statlist, N
}
else if (IsA(clause, ScalarArrayOpExpr))
{
- /* If it's an scalar array operator, check for Var IN Const. */
+ /* If it's a scalar array operator, check for Var IN Const. */
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/*
diff --git a/src/backend/storage/aio/read_stream.c b/src/backend/storage/aio/read_stream.c
index f54dacdd914..634cf4f0d10 100644
--- a/src/backend/storage/aio/read_stream.c
+++ b/src/backend/storage/aio/read_stream.c
@@ -541,9 +541,9 @@ read_stream_begin_relation(int flags,
stream->distance = 1;
/*
- * Since we always always access the same relation, we can initialize
- * parts of the ReadBuffersOperation objects and leave them that way, to
- * avoid wasting CPU cycles writing to them for each read.
+ * Since we always access the same relation, we can initialize parts of
+ * the ReadBuffersOperation objects and leave them that way, to avoid
+ * wasting CPU cycles writing to them for each read.
*/
for (int i = 0; i < max_ios; ++i)
{
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 901b7230fb9..49637284f91 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1073,7 +1073,7 @@ PinBufferForBlock(Relation rel,
/*
* If there is no Relation it usually implies recovery and thus permanent,
- * but we take an argmument because CreateAndCopyRelationData can reach us
+ * but we take an argument because CreateAndCopyRelationData can reach us
* with only an SMgrRelation for an unlogged relation that we don't want
* to flag with BM_PERMANENT.
*/
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 5022a50dd7b..5154353c844 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* Sleep till someone wakes me up. We do this even in the dontWait
- * case, beause while trying to go to sleep, we may discover that we
+ * case, because while trying to go to sleep, we may discover that we
* can acquire the lock immediately after all.
*/
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 162b1f919db..e4f256c63c7 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -1047,7 +1047,7 @@ AuxiliaryPidGetProc(int pid)
* called, because it could be that when we try to find a position at which
* to insert ourself into the wait queue, we discover that we must be inserted
* ahead of everyone who wants a lock that conflict with ours. In that case,
- * we get the lock immediately. Beause of this, it's sensible for this function
+ * we get the lock immediately. Because of this, it's sensible for this function
* to have a dontWait argument, despite the name.
*
* The lock table's partition lock must be held at entry, and will be held
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 4daf1a68d9d..8a0a2dbc850 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -4221,7 +4221,7 @@ JsonTableSetDocument(TableFuncScanState *state, Datum value)
}
/*
- * Evaluate a JsonTablePlan's jsonpath to get a new row pattren from
+ * Evaluate a JsonTablePlan's jsonpath to get a new row pattern from
* the given context item
*/
static void
@@ -4339,7 +4339,7 @@ JsonTablePlanScanNextRow(JsonTablePlanState *planstate)
/*
* Now fetch the nested plan's current row to be joined against the
* parent row. Any further nested plans' paths will be re-evaluated
- * reursively, level at a time, after setting each nested plan's
+ * recursively, level at a time, after setting each nested plan's
* current row.
*/
(void) JsonTablePlanNextRow(planstate->nested);
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index 8f12c953cad..558c6c18c34 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -330,7 +330,7 @@ multirange_out(PG_FUNCTION_ARGS)
}
/*
- * Binary representation: First a int32-sized count of ranges, followed by
+ * Binary representation: First an int32-sized count of ranges, followed by
* ranges in their native binary representation.
*/
Datum
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 35f8f306ee4..5f5d7959d8e 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -6968,7 +6968,7 @@ btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
* btree scans, making the top-level scan look like a continuous scan
* (as opposed to num_sa_scans-many primitive index scans). After
* all, btree scans mostly work like that at runtime. However, such a
- * scheme would badly bias genericcostestimate's simplistic appraoch
+ * scheme would badly bias genericcostestimate's simplistic approach
* to calculating numIndexPages through prorating.
*
* Stick with the approach taken by non-native SAOP scans for now.
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 751cc3408c5..dede30dd86a 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -979,8 +979,8 @@ AllocSetAlloc(MemoryContext context, Size size, int flags)
Assert(set->blocks != NULL);
/*
- * If requested size exceeds maximum for chunks we hand the the request
- * off to AllocSetAllocLarge().
+ * If requested size exceeds maximum for chunks we hand the request off to
+ * AllocSetAllocLarge().
*/
if (size > set->allocChunkLimit)
return AllocSetAllocLarge(context, size, flags);
diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c
index a98bafbcc03..c60c9c131e3 100644
--- a/src/backend/utils/mmgr/bump.c
+++ b/src/backend/utils/mmgr/bump.c
@@ -505,8 +505,8 @@ BumpAlloc(MemoryContext context, Size size, int flags)
#endif
/*
- * If requested size exceeds maximum for chunks we hand the the request
- * off to BumpAllocLarge().
+ * If requested size exceeds maximum for chunks we hand the request off to
+ * BumpAllocLarge().
*/
if (chunk_size > set->allocChunkLimit)
return BumpAllocLarge(context, size, flags);
diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c
index 5d81af1f947..b858b8d0f7b 100644
--- a/src/backend/utils/mmgr/generation.c
+++ b/src/backend/utils/mmgr/generation.c
@@ -541,8 +541,8 @@ GenerationAlloc(MemoryContext context, Size size, int flags)
#endif
/*
- * If requested size exceeds maximum for chunks we hand the the request
- * off to GenerationAllocLarge().
+ * If requested size exceeds maximum for chunks we hand the request off to
+ * GenerationAllocLarge().
*/
if (chunk_size > set->allocChunkLimit)
return GenerationAllocLarge(context, size, flags);
diff --git a/src/bin/pg_basebackup/bbstreamer_tar.c b/src/bin/pg_basebackup/bbstreamer_tar.c
index a778e620cc5..dec71ea65b3 100644
--- a/src/bin/pg_basebackup/bbstreamer_tar.c
+++ b/src/bin/pg_basebackup/bbstreamer_tar.c
@@ -345,7 +345,7 @@ bbstreamer_tar_parser_free(bbstreamer *streamer)
}
/*
- * Create an bbstreamer that can generate a tar archive.
+ * Create a bbstreamer that can generate a tar archive.
*
* This is intended to be usable either for generating a brand-new tar archive
* or for modifying one on the fly. The input should be a series of typed
diff --git a/src/bin/pg_combinebackup/pg_combinebackup.c b/src/bin/pg_combinebackup/pg_combinebackup.c
index cfeb6ebe02f..b26c5324451 100644
--- a/src/bin/pg_combinebackup/pg_combinebackup.c
+++ b/src/bin/pg_combinebackup/pg_combinebackup.c
@@ -1177,7 +1177,7 @@ remember_to_cleanup_directory(char *target_path, bool rmtopdir)
}
/*
- * Empty out the list of directories scheduled for cleanup a exit.
+ * Empty out the list of directories scheduled for cleanup at exit.
*
* We want to remove the output directories only on a failure, so call this
* function when we know that the operation has succeeded.
diff --git a/src/bin/pg_combinebackup/reconstruct.c b/src/bin/pg_combinebackup/reconstruct.c
index 15f62c18df8..d481a5c565f 100644
--- a/src/bin/pg_combinebackup/reconstruct.c
+++ b/src/bin/pg_combinebackup/reconstruct.c
@@ -756,7 +756,7 @@ write_block(int fd, char *output_filename,
}
/*
- * Read a block of data (BLCKSZ bytes) into the the buffer.
+ * Read a block of data (BLCKSZ bytes) into the buffer.
*/
static void
read_block(rfile *s, off_t off, uint8 *buffer)
diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl
index 48918e8c294..a038928fe72 100644
--- a/src/bin/pg_upgrade/t/004_subscription.pl
+++ b/src/bin/pg_upgrade/t/004_subscription.pl
@@ -241,7 +241,7 @@ my $tab_upgraded2_oid = $old_sub->safe_psql('postgres',
$old_sub->stop;
-# Change configuration so that initial table sync sync does not get started
+# Change configuration so that initial table sync does not get started
# automatically
$new_sub->append_conf('postgresql.conf',
"max_logical_replication_workers = 0");
diff --git a/src/common/unicode_category.c b/src/common/unicode_category.c
index bece7334f5b..359e82ec316 100644
--- a/src/common/unicode_category.c
+++ b/src/common/unicode_category.c
@@ -23,7 +23,7 @@
/*
* Create bitmasks from pg_unicode_category values for efficient comparison of
* multiple categories. For instance, PG_U_MN_MASK is a bitmask representing
- * the general cateogry Mn; and PG_U_M_MASK represents general categories Mn,
+ * the general category Mn; and PG_U_M_MASK represents general categories Mn,
* Me, and Mc.
*
* The number of Unicode General Categories should never grow, so a 32-bit
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index 551a0370aa2..22a1747c4de 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -287,7 +287,7 @@ typedef struct xl_heap_prune
uint8 flags;
/*
- * If XLHP_HAS_CONFLICT_HORIZON is set, the conflict horzion XID follows,
+ * If XLHP_HAS_CONFLICT_HORIZON is set, the conflict horizon XID follows,
* unaligned
*/
} xl_heap_prune;
@@ -322,7 +322,7 @@ typedef struct xl_heap_prune
#define XLHP_HAS_FREEZE_PLANS (1 << 4)
/*
- * XLHP_HAS_REDIRECTIONS, XLHP_HAS_DEAD_ITEMS, and XLHP_HAS_NOW_UNUSED
+ * XLHP_HAS_REDIRECTIONS, XLHP_HAS_DEAD_ITEMS, and XLHP_HAS_NOW_UNUSED_ITEMS
* indicate that xlhp_prune_items sub-records with redirected, dead, and
* unused item offsets are present.
*/
@@ -354,9 +354,9 @@ typedef struct xlhp_freeze_plan
*
* The backup block's data contains an array of xlhp_freeze_plan structs (with
* nplans elements). The individual item offsets are located in an array at
- * the end of the entire record with with nplans * (each plan's ntuples)
- * members. Those offsets are in the same order as the plans. The REDO
- * routine uses the offsets to freeze the corresponding heap tuples.
+ * the end of the entire record with nplans * (each plan's ntuples) members
+ * Those offsets are in the same order as the plans. The REDO routine uses
+ * the offsets to freeze the corresponding heap tuples.
*
* (As of PostgreSQL 17, XLOG_HEAP2_PRUNE_VACUUM_SCAN records replace the
* separate XLOG_HEAP2_FREEZE_PAGE records.)
diff --git a/src/include/common/hashfn_unstable.h b/src/include/common/hashfn_unstable.h
index 7b647470aba..0adb0f82f9a 100644
--- a/src/include/common/hashfn_unstable.h
+++ b/src/include/common/hashfn_unstable.h
@@ -73,7 +73,7 @@
*
* For longer or variable-length input, fasthash_accum() is a more
* flexible, but more verbose method. The standalone functions use this
- * internally, so see fasthash64() for an an example of this.
+ * internally, so see fasthash64() for an example of this.
*
* After all inputs have been mixed in, finalize the hash:
*
diff --git a/src/include/lib/radixtree.h b/src/include/lib/radixtree.h
index dc4c00d38a6..d9f545d491a 100644
--- a/src/include/lib/radixtree.h
+++ b/src/include/lib/radixtree.h
@@ -64,7 +64,7 @@
* small enough.
*
* There are two other techniques described in the paper that are not
- * impemented here:
+ * implemented here:
* - path compression "...removes all inner nodes that have only a single child."
* - lazy path expansion "...inner nodes are only created if they are required
* to distinguish at least two leaf nodes."
@@ -385,7 +385,7 @@ typedef struct RT_NODE
/*
* Number of children. uint8 is sufficient for all node kinds, because
- * nodes shrink when this number gets lower than some thresold. Since
+ * nodes shrink when this number gets lower than some threshold. Since
* node256 cannot possibly have zero children, we let the counter overflow
* and we interpret zero as "256" for this node kind.
*/
@@ -1581,7 +1581,7 @@ RT_EXTEND_UP(RT_RADIX_TREE * tree, uint64 key)
Assert(shift < target_shift);
- /* Grow tree upwards until start shift can accomodate the key */
+ /* Grow tree upwards until start shift can accommodate the key */
while (shift < target_shift)
{
RT_CHILD_PTR node;
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 6c71098f2d8..b8141f141aa 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -2867,7 +2867,7 @@ typedef struct PlaceHolderVar
* are not set meaningfully within such structs.
*
* We also create transient SpecialJoinInfos for child joins during
- * partiotionwise join planning, which are also not present in join_info_list.
+ * partitionwise join planning, which are also not present in join_info_list.
*/
#ifndef HAVE_SPECIALJOININFO_TYPEDEF
typedef struct SpecialJoinInfo SpecialJoinInfo;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 0052c1f0ee2..719c4b7b615 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -1791,7 +1791,7 @@ typedef struct JsonExpr
/* Format of the above expression needed by ruleutils.c */
JsonFormat *format;
- /* jsopath-valued expression containing the query pattern */
+ /* jsonpath-valued expression containing the query pattern */
Node *path_spec;
/* Expected type/format of the output. */
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 18891a86fb9..9488bf1857c 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -202,7 +202,7 @@ struct PGPROC
LocalTransactionId lxid; /* local id of top-level transaction
* currently * being executed by this
* proc, if running; else
- * InvalidLocaltransactionId */
+ * InvalidLocalTransactionId */
} vxid;
/* These fields are zero while a backend is still starting up: */
diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c
index 954dce54317..4f00a91b51e 100644
--- a/src/interfaces/libpq/fe-cancel.c
+++ b/src/interfaces/libpq/fe-cancel.c
@@ -210,7 +210,7 @@ PQcancelPoll(PGcancelConn *cancelConn)
int n;
/*
- * We leave most of the connection establishement to PQconnectPoll, since
+ * We leave most of the connection establishment to PQconnectPoll, since
* it's very similar to normal connection establishment. But once we get
* to the CONNECTION_AWAITING_RESPONSE we need to start doing our own
* thing.
diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c
index c98cfa60848..e7a4d006e19 100644
--- a/src/interfaces/libpq/fe-secure-openssl.c
+++ b/src/interfaces/libpq/fe-secure-openssl.c
@@ -1655,7 +1655,7 @@ pgtls_close(PGconn *conn)
{
/*
* In the non-SSL case, just remove the crypto callbacks if the
- * connection has then loaded. This code path has no dependency on
+ * connection has them loaded. This code path has no dependency on
* any pending SSL calls.
*/
if (conn->crypto_loaded)
diff --git a/src/test/isolation/expected/temp-schema-cleanup.out b/src/test/isolation/expected/temp-schema-cleanup.out
index 35b91d9e450..d10aee53a80 100644
--- a/src/test/isolation/expected/temp-schema-cleanup.out
+++ b/src/test/isolation/expected/temp-schema-cleanup.out
@@ -9,7 +9,7 @@ step s1_create_temp_objects:
CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
- -- The above bug requirs function removal to happen after a catalog
+ -- The above bug requires function removal to happen after a catalog
-- invalidation. dependency.c sorts objects in descending oid order so
-- that newer objects are deleted before older objects, so create a
-- table after.
@@ -66,7 +66,7 @@ step s1_create_temp_objects:
CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
- -- The above bug requirs function removal to happen after a catalog
+ -- The above bug requires function removal to happen after a catalog
-- invalidation. dependency.c sorts objects in descending oid order so
-- that newer objects are deleted before older objects, so create a
-- table after.
diff --git a/src/test/isolation/specs/temp-schema-cleanup.spec b/src/test/isolation/specs/temp-schema-cleanup.spec
index a9417b7e905..72decba6cbf 100644
--- a/src/test/isolation/specs/temp-schema-cleanup.spec
+++ b/src/test/isolation/specs/temp-schema-cleanup.spec
@@ -30,7 +30,7 @@ step s1_create_temp_objects {
CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
- -- The above bug requirs function removal to happen after a catalog
+ -- The above bug requires function removal to happen after a catalog
-- invalidation. dependency.c sorts objects in descending oid order so
-- that newer objects are deleted before older objects, so create a
-- table after.
diff --git a/src/test/modules/test_resowner/test_resowner_many.c b/src/test/modules/test_resowner/test_resowner_many.c
index 4722c66267a..7808c9e5df3 100644
--- a/src/test/modules/test_resowner/test_resowner_many.c
+++ b/src/test/modules/test_resowner/test_resowner_many.c
@@ -196,7 +196,7 @@ GetTotalResourceCount(ManyTestResourceKind *kinds, int nkinds)
* Remember lots of resources, belonging to 'nkinds' different resource types
* with different priorities. Then forget some of them, and finally, release
* the resource owner. We use a custom resource type that performs various
- * sanity checks to verify that all the the resources are released, and in the
+ * sanity checks to verify that all the resources are released, and in the
* correct order.
*/
PG_FUNCTION_INFO_V1(test_resowner_many);
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index 50695d83a2b..56c361ccef7 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -2889,7 +2889,7 @@ GROUP BY c1.w, c1.z;
RESET enable_nestloop;
RESET enable_hashjoin;
DROP TABLE group_agg_pk;
--- Test the case where the the ordering of scan matches the ordering within the
+-- Test the case where the ordering of the scan matches the ordering within the
-- aggregate but cannot be found in the group-by list
CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int);
CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2);
diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out
index b48365ec981..44114089a6d 100644
--- a/src/test/regress/expected/copy.out
+++ b/src/test/regress/expected/copy.out
@@ -276,8 +276,8 @@ CREATE TABLE parted_si_p_even PARTITION OF parted_si FOR VALUES IN (0);
CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1);
-- Test that bulk relation extension handles reusing a single BulkInsertState
-- across partitions. Without the fix applied, this reliably reproduces
--- #18130 unless shared_buffers is extremely small (preventing any use use of
--- bulk relation extension). See
+-- #18130 unless shared_buffers is extremely small (preventing any use of bulk
+-- relation extension). See
-- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
-- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
\set filename :abs_srcdir '/data/desc.data'
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index af2a878dd67..0b55167ac87 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -2734,7 +2734,7 @@ UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1;
INSERT INTO fkpart10.tbl1 VALUES (0), (1);
COMMIT;
-- test that cross-partition updates correctly enforces the foreign key
--- restriction (specifically testing INITIAILLY DEFERRED)
+-- restriction (specifically testing INITIALLY DEFERRED)
BEGIN;
UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 0c5521d2aa9..09a8d8221ce 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -945,10 +945,10 @@ ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUE
SET client_min_messages = 'ERROR';
CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
RESET client_min_messages;
--- ok - (a,b) coverts all PK cols
+-- ok - (a,b) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
--- ok - (a,b,c) coverts all PK cols
+-- ok - (a,b,c) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
-- fail - "b" is missing from the column list
diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out
index 4eff85da793..0bbf2ff4ca2 100644
--- a/src/test/regress/expected/tsdicts.out
+++ b/src/test/regress/expected/tsdicts.out
@@ -689,7 +689,7 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
ERROR: unrecognized Ispell parameter: "DictFile"
-- Test grammar for configurations
CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
--- Overriden mapping change with duplicated tokens.
+-- Overridden mapping change with duplicated tokens.
ALTER TEXT SEARCH CONFIGURATION dummy_tst
ALTER MAPPING FOR word, word WITH ispell;
-- Not a token supported by the configuration's parser, fails.
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 2905848eadc..d28338ba3da 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -1257,7 +1257,7 @@ RESET enable_nestloop;
RESET enable_hashjoin;
DROP TABLE group_agg_pk;
--- Test the case where the the ordering of scan matches the ordering within the
+-- Test the case where the ordering of the scan matches the ordering within the
-- aggregate but cannot be found in the group-by list
CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int);
CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2);
diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql
index 43d2e906dd9..e2dd24cb351 100644
--- a/src/test/regress/sql/copy.sql
+++ b/src/test/regress/sql/copy.sql
@@ -306,8 +306,8 @@ CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1);
-- Test that bulk relation extension handles reusing a single BulkInsertState
-- across partitions. Without the fix applied, this reliably reproduces
--- #18130 unless shared_buffers is extremely small (preventing any use use of
--- bulk relation extension). See
+-- #18130 unless shared_buffers is extremely small (preventing any use of bulk
+-- relation extension). See
-- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
-- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
\set filename :abs_srcdir '/data/desc.data'
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index 22e177f89b3..f5e09389997 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1943,7 +1943,7 @@ INSERT INTO fkpart10.tbl1 VALUES (0), (1);
COMMIT;
-- test that cross-partition updates correctly enforces the foreign key
--- restriction (specifically testing INITIAILLY DEFERRED)
+-- restriction (specifically testing INITIALLY DEFERRED)
BEGIN;
UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 8ba8036bfbd..479d4f32644 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -603,10 +603,10 @@ ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUE
SET client_min_messages = 'ERROR';
CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
RESET client_min_messages;
--- ok - (a,b) coverts all PK cols
+-- ok - (a,b) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
--- ok - (a,b,c) coverts all PK cols
+-- ok - (a,b,c) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
-- fail - "b" is missing from the column list
diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql
index 6a2b00369ce..cf08410bb2d 100644
--- a/src/test/regress/sql/tsdicts.sql
+++ b/src/test/regress/sql/tsdicts.sql
@@ -254,7 +254,7 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
-- Test grammar for configurations
CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
--- Overriden mapping change with duplicated tokens.
+-- Overridden mapping change with duplicated tokens.
ALTER TEXT SEARCH CONFIGURATION dummy_tst
ALTER MAPPING FOR word, word WITH ispell;
-- Not a token supported by the configuration's parser, fails.
diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl
index e077e255fc0..a2d9462395a 100644
--- a/src/test/subscription/t/004_sync.pl
+++ b/src/test/subscription/t/004_sync.pl
@@ -165,7 +165,7 @@ $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
# When DROP SUBSCRIPTION tries to drop the tablesync slot, the slot may not
# have been created, which causes the slot to be created after the DROP
-# SUSCRIPTION finishes. Such slots eventually get dropped at walsender exit
+# SUBSCRIPTION finishes. Such slots eventually get dropped at walsender exit
# time. So, to prevent being affected by such ephemeral tablesync slots, we
# wait until all the slots have been cleaned.
ok( $node_publisher->poll_query_until(
diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl
index bac1cf39837..d1d68fad9af 100644
--- a/src/test/subscription/t/026_stats.pl
+++ b/src/test/subscription/t/026_stats.pl
@@ -271,7 +271,7 @@ is( $node_subscriber->safe_psql(
my $sub2_oid = $node_subscriber->safe_psql($db,
qq(SELECT oid FROM pg_subscription WHERE subname = '$sub2_name'));
-# Diassociate the subscription 2 from its replication slot and drop it
+# Disassociate the subscription 2 from its replication slot and drop it
$node_subscriber->safe_psql(
$db,
qq(