aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/nbtree/nbtsearch.c2
-rw-r--r--src/backend/catalog/catalog.c2
-rw-r--r--src/backend/commands/copy.c2
-rw-r--r--src/backend/libpq/be-secure-gssapi.c2
-rw-r--r--src/backend/parser/parse_target.c2
-rw-r--r--src/backend/postmaster/checkpointer.c11
-rw-r--r--src/backend/utils/cache/lsyscache.c2
-rw-r--r--src/port/pg_bitutils.c4
8 files changed, 13 insertions, 14 deletions
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 1f809c24a11..c655dadb963 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -424,7 +424,7 @@ _bt_binsrch(Relation rel,
/*
*
- * bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
+ * _bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
*
* Like _bt_binsrch(), but with support for caching the binary search
* bounds. Only used during insertion, and only on the leaf page that it
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 11936a65713..a065419cdb2 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -383,7 +383,7 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn)
* is also an unused OID within pg_class. If the result is to be used only
* as a relfilenode for an existing relation, pass NULL for pg_class.
*
- * As with GetNewObjectIdWithIndex(), there is some theoretical risk of a race
+ * As with GetNewOidWithIndex(), there is some theoretical risk of a race
* condition, but it doesn't seem worth worrying about.
*
* Note: we don't support using this in bootstrap mode. All relations
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 84c54fbc708..ac86f3d5bea 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2626,7 +2626,7 @@ CopyMultiInsertInfoNextFreeSlot(CopyMultiInsertInfo *miinfo,
/*
* Record the previously reserved TupleTableSlot that was reserved by
- * MultiInsertInfoNextFreeSlot as being consumed.
+ * CopyMultiInsertInfoNextFreeSlot as being consumed.
*/
static inline void
CopyMultiInsertInfoStore(CopyMultiInsertInfo *miinfo, ResultRelInfo *rri,
diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c
index 1673b103158..ba8c0cd0f05 100644
--- a/src/backend/libpq/be-secure-gssapi.c
+++ b/src/backend/libpq/be-secure-gssapi.c
@@ -400,7 +400,7 @@ read_or_wait(Port *port, ssize_t len)
{
/*
* If we got back less than zero, indicating an error, and that
- * wasn't just a EWOULDBOCK/EAGAIN, then give up.
+ * wasn't just a EWOULDBLOCK/EAGAIN, then give up.
*/
if (ret < 0 && !(errno == EWOULDBLOCK || errno == EAGAIN))
return -1;
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index ef2f5b45d87..ba470366e10 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -38,7 +38,7 @@ static void markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
static Node *transformAssignmentIndirection(ParseState *pstate,
Node *basenode,
const char *targetName,
- bool targetIsArray,
+ bool targetIsSubscripting,
Oid targetTypeId,
int32 targetTypMod,
Oid targetCollation,
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 13f152b4731..11bbe2c397f 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -1213,12 +1213,11 @@ CompactCheckpointerRequestQueue(void)
* backwards from the end of the queue and check whether a request is
* *preceded* by an earlier, identical request, in the hopes of doing less
* copying. But that might change the semantics, if there's an
- * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so
- * we do it this way. It would be possible to be even smarter if we made
- * the code below understand the specific semantics of such requests (it
- * could blow away preceding entries that would end up being canceled
- * anyhow), but it's not clear that the extra complexity would buy us
- * anything.
+ * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
+ * this way. It would be possible to be even smarter if we made the code
+ * below understand the specific semantics of such requests (it could blow
+ * away preceding entries that would end up being canceled anyhow), but
+ * it's not clear that the extra complexity would buy us anything.
*/
for (n = 0; n < CheckpointerShmem->num_requests; n++)
{
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index b4f2d0f35ab..c13c08a97b4 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -1055,7 +1055,7 @@ get_opclass_input_type(Oid opclass)
}
/*
- * get_opclass_family_and_input_type
+ * get_opclass_opfamily_and_input_type
*
* Returns the OID of the operator family the opclass belongs to,
* the OID of the datatype the opclass indexes
diff --git a/src/port/pg_bitutils.c b/src/port/pg_bitutils.c
index 60fb55af539..7847e8a451c 100644
--- a/src/port/pg_bitutils.c
+++ b/src/port/pg_bitutils.c
@@ -28,7 +28,7 @@
* left-most the 7th bit. The 0th entry of the array should not be used.
*
* Note: this is not used by the functions in pg_bitutils.h when
- * HAVE_BUILTIN_CLZ is defined, but we provide it anyway, so that
+ * HAVE__BUILTIN_CLZ is defined, but we provide it anyway, so that
* extensions possibly compiled with a different compiler can use it.
*/
const uint8 pg_leftmost_one_pos[256] = {
@@ -56,7 +56,7 @@ const uint8 pg_leftmost_one_pos[256] = {
* left-most the 7th bit. The 0th entry of the array should not be used.
*
* Note: this is not used by the functions in pg_bitutils.h when
- * HAVE_BUILTIN_CTZ is defined, but we provide it anyway, so that
+ * HAVE__BUILTIN_CTZ is defined, but we provide it anyway, so that
* extensions possibly compiled with a different compiler can use it.
*/
const uint8 pg_rightmost_one_pos[256] = {