aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPeter Eisentraut <peter_e@gmx.net>2017-03-14 11:38:30 -0400
committerPeter Eisentraut <peter_e@gmx.net>2017-03-14 12:58:39 -0400
commitf97a028d8ee3e7d64a93285707af94b612c47651 (patch)
tree493c190e8470843ca2cd73ac06f86476d5721062 /src
parent5ed6fff6b729c3cce55d4abc8f695da93aa40a0d (diff)
downloadpostgresql-f97a028d8ee3e7d64a93285707af94b612c47651.tar.gz
postgresql-f97a028d8ee3e7d64a93285707af94b612c47651.zip
Spelling fixes in code comments
From: Josh Soref <jsoref@gmail.com>
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/transam/twophase.c2
-rw-r--r--src/backend/commands/copy.c4
-rw-r--r--src/backend/commands/tablecmds.c2
-rw-r--r--src/backend/foreign/foreign.c2
-rw-r--r--src/backend/optimizer/plan/createplan.c2
-rw-r--r--src/backend/parser/parse_collate.c4
-rw-r--r--src/backend/parser/parse_param.c2
-rw-r--r--src/backend/regex/regc_pg_locale.c2
-rw-r--r--src/backend/storage/ipc/procarray.c4
-rw-r--r--src/backend/storage/lmgr/lock.c4
-rw-r--r--src/backend/tsearch/dict_thesaurus.c2
-rw-r--r--src/backend/utils/adt/selfuncs.c2
-rw-r--r--src/backend/utils/cache/lsyscache.c2
-rw-r--r--src/backend/utils/cache/plancache.c4
-rw-r--r--src/backend/utils/error/elog.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c2
-rw-r--r--src/backend/utils/hash/dynahash.c2
-rw-r--r--src/backend/utils/misc/README2
-rw-r--r--src/bin/pg_dump/parallel.c2
-rw-r--r--src/fe_utils/string_utils.c2
-rw-r--r--src/include/utils/jsonapi.h2
-rw-r--r--src/interfaces/libpq/fe-connect.c2
-rw-r--r--src/interfaces/libpq/fe-exec.c2
-rw-r--r--src/pl/plperl/ppport.h6
-rw-r--r--src/pl/plpgsql/src/pl_exec.c2
-rw-r--r--src/pl/plpython/plpy_procedure.c2
-rw-r--r--src/pl/plpython/plpy_typeio.c2
-rw-r--r--src/test/regress/expected/errors.out2
-rw-r--r--src/test/regress/expected/groupingsets.out2
-rw-r--r--src/test/regress/sql/errors.sql2
-rw-r--r--src/test/regress/sql/groupingsets.sql2
31 files changed, 40 insertions, 40 deletions
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 5cefc43bfe3..83ca6e04081 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -1612,7 +1612,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* It's also possible to move I/O out of the lock, but on every error we
* should check whether somebody committed our transaction in different
- * backend. Let's leave this optimisation for future, if somebody will
+ * backend. Let's leave this optimization for future, if somebody will
* spot that this place cause bottleneck.
*
* Note that it isn't possible for there to be a GXACT with a
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 3102ab18c57..ba89b292d1e 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2406,7 +2406,7 @@ CopyFrom(CopyState cstate)
* earlier scan or command. This ensures that if this subtransaction
* aborts then the frozen rows won't be visible after xact cleanup. Note
* that the stronger test of exactly which subtransaction created it is
- * crucial for correctness of this optimisation.
+ * crucial for correctness of this optimization.
*/
if (cstate->freeze)
{
@@ -2973,7 +2973,7 @@ BeginCopyFrom(ParseState *pstate,
* the special case of when the default expression is the
* nextval() of a sequence which in this specific case is
* known to be safe for use with the multi-insert
- * optimisation. Hence we use this special case function
+ * optimization. Hence we use this special case function
* checker rather than the standard check for
* contain_volatile_functions().
*/
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 1ddb72d1647..86329e5f9f2 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -3182,7 +3182,7 @@ AlterTableGetLockLevel(List *cmds)
break;
/*
- * Changing foreign table options may affect optimisation.
+ * Changing foreign table options may affect optimization.
*/
case AT_GenericOptions:
case AT_AlterColumnGenericOptions:
diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c
index fdb4f712534..5a45558b915 100644
--- a/src/backend/foreign/foreign.c
+++ b/src/backend/foreign/foreign.c
@@ -724,7 +724,7 @@ GetExistingLocalJoinPath(RelOptInfo *joinrel)
Path *path = (Path *) lfirst(lc);
JoinPath *joinpath = NULL;
- /* Skip parameterised paths. */
+ /* Skip parameterized paths. */
if (path->param_info != NULL)
continue;
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 4629ca27ee5..89e1946fc26 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -4582,7 +4582,7 @@ fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol)
}
}
- /* Ooops... */
+ /* Oops... */
elog(ERROR, "index key does not match expected index column");
return NULL; /* keep compiler quiet */
}
diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c
index 52ac7227909..cc235d422f5 100644
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -802,7 +802,7 @@ merge_collation_state(Oid collation,
else if (collation != DEFAULT_COLLATION_OID)
{
/*
- * Ooops, we have a conflict. We cannot throw error
+ * Oops, we have a conflict. We cannot throw error
* here, since the conflict could be resolved by a
* later sibling CollateExpr, or the parent might not
* care about collation anyway. Return enough info to
@@ -821,7 +821,7 @@ merge_collation_state(Oid collation,
if (collation != context->collation)
{
/*
- * Ooops, we have a conflict of explicit COLLATE clauses.
+ * Oops, we have a conflict of explicit COLLATE clauses.
* Here we choose to throw error immediately; that is what
* the SQL standard says to do, and there's no good reason
* to be less strict.
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
index 2575e02325e..20fd83f095e 100644
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -210,7 +210,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
}
else
{
- /* Ooops */
+ /* Oops */
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
errmsg("inconsistent types deduced for parameter $%d",
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index afa3a7d613c..0121cbb2ada 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -224,7 +224,7 @@ static const unsigned char pg_char_properties[128] = {
* pg_set_regex_collation: set collation for these functions to obey
*
* This is called when beginning compilation or execution of a regexp.
- * Since there's no need for re-entrancy of regexp operations, it's okay
+ * Since there's no need for reentrancy of regexp operations, it's okay
* to store the results in static variables.
*/
void
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index cd14667c16c..c724a0e9525 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -280,7 +280,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is a
+ * Oops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@@ -370,7 +370,7 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
}
}
- /* Ooops */
+ /* Oops */
LWLockRelease(ProcArrayLock);
elog(LOG, "failed to find proc %p in ProcArray", proc);
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index ad64a79fa1d..4315be40773 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -1125,7 +1125,7 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
&found);
if (!proclock)
{
- /* Ooops, not enough shmem for the proclock */
+ /* Oops, not enough shmem for the proclock */
if (lock->nRequested == 0)
{
/*
@@ -4046,7 +4046,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
&found);
if (!proclock)
{
- /* Ooops, not enough shmem for the proclock */
+ /* Oops, not enough shmem for the proclock */
if (lock->nRequested == 0)
{
/*
diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c
index ee23fcfac8b..9a01075d4e1 100644
--- a/src/backend/tsearch/dict_thesaurus.c
+++ b/src/backend/tsearch/dict_thesaurus.c
@@ -23,7 +23,7 @@
/*
- * Temporay we use TSLexeme.flags for inner use...
+ * Temporary we use TSLexeme.flags for inner use...
*/
#define DT_USEASIS 0x1000
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 5f28a1a7c58..04bd9b95b27 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -4312,7 +4312,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
return true;
}
- /* Ooops, clause has wrong structure (probably var op var) */
+ /* Oops, clause has wrong structure (probably var op var) */
ReleaseVariableStats(*vardata);
ReleaseVariableStats(rdata);
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 1b04c098d06..b891f388e5d 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -2332,7 +2332,7 @@ get_typavgwidth(Oid typid, int32 typmod)
}
/*
- * Ooops, we have no idea ... wild guess time.
+ * Oops, we have no idea ... wild guess time.
*/
return 32;
}
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index dffc92762bc..fa023748115 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -621,7 +621,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
return NIL;
}
- /* Ooops, the race case happened. Release useless locks. */
+ /* Oops, the race case happened. Release useless locks. */
AcquirePlannerLocks(plansource->query_list, false);
}
@@ -845,7 +845,7 @@ CheckCachedPlan(CachedPlanSource *plansource)
return true;
}
- /* Ooops, the race case happened. Release useless locks. */
+ /* Oops, the race case happened. Release useless locks. */
AcquireExecutorLocks(plan->stmt_list, false);
}
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 40e023c7d74..6e83cbedabf 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -318,7 +318,7 @@ errstart(int elevel, const char *filename, int lineno,
*/
if (ErrorContext == NULL)
{
- /* Ooops, hard crash time; very little we can do safely here */
+ /* Oops, hard crash time; very little we can do safely here */
write_stderr("error occurred at %s:%d before error message processing is available\n",
filename ? filename : "(unknown file)", lineno);
exit(2);
@@ -331,7 +331,7 @@ errstart(int elevel, const char *filename, int lineno,
if (recursion_depth++ > 0 && elevel >= ERROR)
{
/*
- * Ooops, error during error processing. Clear ErrorContext as
+ * Oops, error during error processing. Clear ErrorContext as
* discussed at top of file. We will not return to the original
* error's reporter or handler, so we don't need it.
*/
@@ -1302,7 +1302,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
/* Make sure that memory context initialization has finished */
if (ErrorContext == NULL)
{
- /* Ooops, hard crash time; very little we can do safely here */
+ /* Oops, hard crash time; very little we can do safely here */
write_stderr("error occurred at %s:%d before error message processing is available\n",
filename ? filename : "(unknown file)", lineno);
exit(2);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 3976496aef6..8c00da6c6d6 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -877,7 +877,7 @@ struct fmgr_security_definer_cache
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
* the actual arguments, etc.) intact. This is not re-entrant, but then
- * the fcinfo itself can't be used re-entrantly anyway.
+ * the fcinfo itself can't be used reentrantly anyway.
*/
static Datum
fmgr_security_definer(PG_FUNCTION_ARGS)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index d370b727191..12b1658c9ad 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -72,7 +72,7 @@
* when combined with HASH_DEBUG, these are displayed by hdestroy().
*
* Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor
- * concatenation property, in probably unnecessary code 'optimisation'.
+ * concatenation property, in probably unnecessary code 'optimization'.
*
* Modified margo@postgres.berkeley.edu February 1990
* added multiple table interface
diff --git a/src/backend/utils/misc/README b/src/backend/utils/misc/README
index 70244ced18d..6e294386f76 100644
--- a/src/backend/utils/misc/README
+++ b/src/backend/utils/misc/README
@@ -114,7 +114,7 @@ If a show_hook is provided, it points to a function of the signature
This hook allows variable-specific computation of the value displayed
by SHOW (and other SQL features for showing GUC variable values).
The return value can point to a static buffer, since show functions are
-not used re-entrantly.
+not used reentrantly.
Saving/Restoring GUC Variable Values
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index be8e018a964..feda575af85 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -92,7 +92,7 @@ struct ParallelSlot
/* These fields are valid if workerStatus == WRKR_WORKING: */
ParallelCompletionPtr callback; /* function to call on completion */
- void *callback_data; /* passthru data for it */
+ void *callback_data; /* passthrough data for it */
ArchiveHandle *AH; /* Archive data worker is using */
diff --git a/src/fe_utils/string_utils.c b/src/fe_utils/string_utils.c
index 08fe765b5af..d1a9ddc4c6c 100644
--- a/src/fe_utils/string_utils.c
+++ b/src/fe_utils/string_utils.c
@@ -173,7 +173,7 @@ fmtQualifiedId(int remoteVersion, const char *schema, const char *id)
* returned by PQserverVersion()) as a string. This exists mainly to
* encapsulate knowledge about two-part vs. three-part version numbers.
*
- * For re-entrancy, caller must supply the buffer the string is put in.
+ * For reentrancy, caller must supply the buffer the string is put in.
* Recommended size of the buffer is 32 bytes.
*
* Returns address of 'buf', as a notational convenience.
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
index 6962f1a6be6..8f132d732be 100644
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -127,7 +127,7 @@ extern JsonLexContext *makeJsonLexContextCstringLen(char *json,
/*
* Utility function to check if a string is a valid JSON number.
*
- * str agrument does not need to be nul-terminated.
+ * str argument does not need to be nul-terminated.
*/
extern bool IsValidJsonNumber(const char *str, int len);
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index c1814f5fe43..27155f8578d 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -2136,7 +2136,7 @@ keep_going: /* We will come back to here until there is
} /* loop over addresses */
/*
- * Ooops, no more addresses. An appropriate error message is
+ * Oops, no more addresses. An appropriate error message is
* already set up, so just set the right status.
*/
goto error_return;
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index b5518757ecf..9decd5339e2 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -2334,7 +2334,7 @@ PQputCopyEnd(PGconn *conn, const char *errormsg)
{
if (errormsg)
{
- /* Ooops, no way to do this in 2.0 */
+ /* Oops, no way to do this in 2.0 */
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("function requires at least protocol version 3.0\n"));
return -1;
diff --git a/src/pl/plperl/ppport.h b/src/pl/plperl/ppport.h
index 31d06cb3b0b..8c2365674f9 100644
--- a/src/pl/plperl/ppport.h
+++ b/src/pl/plperl/ppport.h
@@ -6205,10 +6205,10 @@ DPPP_(my_grok_number)(pTHX_ const char *pv, STRLEN len, UV *valuep)
/* UVs are at least 32 bits, so the first 9 decimal digits cannot
overflow. */
UV value = *s - '0';
- /* This construction seems to be more optimiser friendly.
+ /* This construction seems to be more optimizer friendly.
(without it gcc does the isDIGIT test and the *s - '0' separately)
With it gcc on arm is managing 6 instructions (6 cycles) per digit.
- In theory the optimiser could deduce how far to unroll the loop
+ In theory the optimizer could deduce how far to unroll the loop
before checking for overflow. */
if (++s < send) {
int digit = *s - '0';
@@ -6606,7 +6606,7 @@ DPPP_(my_grok_oct)(pTHX_ const char *start, STRLEN *len_p, I32 *flags, NV *resul
bool overflowed = FALSE;
for (; len-- && *s; s++) {
- /* gcc 2.95 optimiser not smart enough to figure that this subtraction
+ /* gcc 2.95 optimizer not smart enough to figure that this subtraction
out front allows slicker code. */
int digit = *s - '0';
if (digit >= 0 && digit <= 7) {
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 196e518e0df..49a4e622ffd 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -5543,7 +5543,7 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate,
exec_check_rw_parameter(expr, expr->rwparam);
if (expr->expr_simple_expr == NULL)
{
- /* Ooops, release refcount and fail */
+ /* Oops, release refcount and fail */
ReleaseCachedPlan(cplan, true);
return false;
}
diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c
index 2b249b029d2..e86117c8374 100644
--- a/src/pl/plpython/plpy_procedure.c
+++ b/src/pl/plpython/plpy_procedure.c
@@ -122,7 +122,7 @@ PLy_procedure_get(Oid fn_oid, Oid fn_rel, bool is_trigger)
}
PG_CATCH();
{
- /* Do not leave an uninitialised entry in the cache */
+ /* Do not leave an uninitialized entry in the cache */
if (use_cache)
hash_search(PLy_procedure_cache, &key, HASH_REMOVE, NULL);
PG_RE_THROW();
diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c
index 2962d5a6266..34acec8501a 100644
--- a/src/pl/plpython/plpy_typeio.c
+++ b/src/pl/plpython/plpy_typeio.c
@@ -827,7 +827,7 @@ PLyObject_ToComposite(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inar
/*
* This will set up the dummy PLyTypeInfo's output conversion routines,
- * since we left is_rowtype as 2. A future optimisation could be caching
+ * since we left is_rowtype as 2. A future optimization could be caching
* that info instead of looking it up every time a tuple is returned from
* the function.
*/
diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out
index 210e5ff39cb..ce473a03efd 100644
--- a/src/test/regress/expected/errors.out
+++ b/src/test/regress/expected/errors.out
@@ -1,7 +1,7 @@
--
-- ERRORS
--
--- bad in postquel, but ok in postsql
+-- bad in postquel, but ok in PostgreSQL
select 1;
?column?
----------
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index 260ccd52c87..b0886da8d70 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -375,7 +375,7 @@ select *
ERROR: aggregate functions are not allowed in FROM clause of their own query level
LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ...
^
--- min max optimisation should still work with GROUP BY ()
+-- min max optimization should still work with GROUP BY ()
explain (costs off)
select min(unique1) from tenk1 GROUP BY ();
QUERY PLAN
diff --git a/src/test/regress/sql/errors.sql b/src/test/regress/sql/errors.sql
index cd370b4781e..14bc723a52a 100644
--- a/src/test/regress/sql/errors.sql
+++ b/src/test/regress/sql/errors.sql
@@ -2,7 +2,7 @@
-- ERRORS
--
--- bad in postquel, but ok in postsql
+-- bad in postquel, but ok in PostgreSQL
select 1;
diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql
index 71cc0ec9007..cc557cc552d 100644
--- a/src/test/regress/sql/groupingsets.sql
+++ b/src/test/regress/sql/groupingsets.sql
@@ -140,7 +140,7 @@ select *
from (values (1),(2)) v(x),
lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s;
--- min max optimisation should still work with GROUP BY ()
+-- min max optimization should still work with GROUP BY ()
explain (costs off)
select min(unique1) from tenk1 GROUP BY ();