aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
authorMichael Paquier <michael@paquier.xyz>2019-07-16 13:23:53 +0900
committerMichael Paquier <michael@paquier.xyz>2019-07-16 13:23:53 +0900
commit0896ae561b6c799d45cb61d8a3b18fbb442130a7 (patch)
treecf4204ef18047e3c1eac4e9daa320156b106a374 /src/backend
parent4c3d05d875dd173a81a995c6e14d69496b467eec (diff)
downloadpostgresql-0896ae561b6c799d45cb61d8a3b18fbb442130a7.tar.gz
postgresql-0896ae561b6c799d45cb61d8a3b18fbb442130a7.zip
Fix inconsistencies and typos in the tree
This is numbered take 7, and addresses a set of issues around: - Fixes for typos and incorrect reference names. - Removal of unneeded comments. - Removal of unreferenced functions and structures. - Fixes regarding variable name consistency. Author: Alexander Lakhin Discussion: https://postgr.es/m/10bfd4ac-3e7c-40ab-2b2e-355ed15495e8@gmail.com
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/gin/README2
-rw-r--r--src/backend/access/gin/ginfast.c2
-rw-r--r--src/backend/access/gist/README2
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/nbtree/README2
-rw-r--r--src/backend/access/spgist/spgscan.c5
-rw-r--r--src/backend/access/transam/clog.c2
-rw-r--r--src/backend/bootstrap/bootstrap.c2
-rw-r--r--src/backend/catalog/namespace.c2
-rw-r--r--src/backend/executor/nodeAgg.c2
-rw-r--r--src/backend/optimizer/prep/prepunion.c4
-rw-r--r--src/backend/parser/parse_agg.c2
-rw-r--r--src/backend/rewrite/rewriteManip.c2
-rw-r--r--src/backend/storage/buffer/bufmgr.c5
-rw-r--r--src/backend/storage/file/buffile.c2
-rw-r--r--src/backend/storage/file/fd.c2
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/lmgr/lock.c12
-rw-r--r--src/backend/storage/lmgr/predicate.c6
-rw-r--r--src/backend/utils/adt/formatting.c2
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/ruleutils.c18
-rw-r--r--src/backend/utils/mmgr/dsa.c4
23 files changed, 41 insertions, 47 deletions
diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README
index 30c0867829e..838fdc0d630 100644
--- a/src/backend/access/gin/README
+++ b/src/backend/access/gin/README
@@ -163,7 +163,7 @@ algorithms.
* The posting list can be accessed with GinGetPosting(itup)
-* If GinITupIsCompressed(itup), the posting list is stored in compressed
+* If GinItupIsCompressed(itup), the posting list is stored in compressed
format. Otherwise it is just an array of ItemPointers. New tuples are always
stored in compressed format, uncompressed items can be present if the
database was migrated from 9.3 or earlier version.
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 2b3dd1c677f..439a91b3e61 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -1013,7 +1013,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
/*
* As pending list pages can have a high churn rate, it is desirable to
- * recycle them immediately to the FreeSpace Map when ordinary backends
+ * recycle them immediately to the FreeSpaceMap when ordinary backends
* clean the list.
*/
if (fsm_vac && fill_fsm)
diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README
index 84a4961d0c4..8cbca692967 100644
--- a/src/backend/access/gist/README
+++ b/src/backend/access/gist/README
@@ -170,7 +170,7 @@ it splits the page, and constructs the new downlink tuples for the split
pages. The caller must then call gistplacetopage() on the parent page to
insert the downlink tuples. The parent page that holds the downlink to
the child might have migrated as a result of concurrent splits of the
-parent, gistfindCorrectParent() is used to find the parent page.
+parent, gistFindCorrectParent() is used to find the parent page.
Splitting the root page works slightly differently. At root split,
gistplacetopage() allocates the new child pages and replaces the old root
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 470b121e7da..dfb51f609d8 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -821,7 +821,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
/*
* Leaf page. Insert the new key. We've already updated all the
* parents on the way down, but we might have to split the page if
- * it doesn't fit. gistinserthere() will take care of that.
+ * it doesn't fit. gistinserttuple() will take care of that.
*/
/*
diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README
index c5b0a30e4eb..3d01b7854df 100644
--- a/src/backend/access/nbtree/README
+++ b/src/backend/access/nbtree/README
@@ -457,7 +457,7 @@ right sibling's left-link --- followed by a second WAL entry for the
insertion on the parent level (which might itself be a page split, requiring
an additional insertion above that, etc).
-For a root split, the followon WAL entry is a "new root" entry rather than
+For a root split, the follow-on WAL entry is a "new root" entry rather than
an "insertion" entry, but details are otherwise much the same.
Because splitting involves multiple atomic actions, it's possible that the
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 7bc5ec09bf9..557dd18d7e8 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -146,11 +146,6 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
{
MemoryContext oldCtx;
- /*
- * clear traversal context before proceeding to the next scan; this must
- * not happen before the freeScanStack above, else we get double-free
- * crashes.
- */
MemoryContextReset(so->traversalCxt);
oldCtx = MemoryContextSwitchTo(so->traversalCxt);
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 47db7a8a88c..d78f706ff7f 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -155,7 +155,7 @@ static void TransactionIdSetPageStatusInternal(TransactionId xid, int nsubxids,
* NB: this is a low-level routine and is NOT the preferred entry point
* for most uses; functions in transam.c are the intended callers.
*
- * XXX Think about issuing FADVISE_WILLNEED on pages that we will need,
+ * XXX Think about issuing POSIX_FADV_WILLNEED on pages that we will need,
* but aren't yet in cache, as well as hinting pages not to fall out of
* cache yet.
*/
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 43627ab8f4e..9238fbe98d7 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -716,7 +716,7 @@ DefineAttr(char *name, char *type, int attnum, int nullness)
namestrcpy(&attrtypes[attnum]->attname, name);
elog(DEBUG4, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
- attrtypes[attnum]->attnum = attnum + 1; /* fillatt */
+ attrtypes[attnum]->attnum = attnum + 1;
typeoid = gettype(type);
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 5fd9eba57ff..5cdd51cb5d8 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -2399,7 +2399,7 @@ TSParserIsVisible(Oid prsId)
/*
* get_ts_dict_oid - find a TS dictionary by possibly qualified name
*
- * If not found, returns InvalidOid if failOK, else throws error
+ * If not found, returns InvalidOid if missing_ok, else throws error
*/
Oid
get_ts_dict_oid(List *names, bool missing_ok)
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 6b8ef40599b..cb4ab4b21e9 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -882,7 +882,7 @@ process_ordered_aggregate_multi(AggState *aggstate,
* This function handles only one grouping set (already set in
* aggstate->current_set).
*
- * The finalfunction will be run, and the result delivered, in the
+ * The finalfn will be run, and the result delivered, in the
* output-tuple context; caller's CurrentMemoryContext does not matter.
*
* The finalfn uses the state as set in the transno. This also might be
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 61120778516..f5f934ab5cf 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -362,8 +362,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
* fix_upper_expr() to the Result node's tlist. This would fail if the
* Vars generated by generate_setop_tlist() were not exactly equal()
* to the corresponding tlist entries of the subplan. However, since
- * the subplan was generated by generate_union_plan() or
- * generate_nonunion_plan(), and hence its tlist was generated by
+ * the subplan was generated by generate_union_paths() or
+ * generate_nonunion_paths(), and hence its tlist was generated by
* generate_append_tlist(), this will work. We just tell
* generate_setop_tlist() to use varno 0.
*/
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 8dc3793b5fc..1ae4fb5b219 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -58,7 +58,7 @@ static int check_agg_arguments(ParseState *pstate,
static bool check_agg_arguments_walker(Node *node,
check_agg_arguments_context *context);
static void check_ungrouped_columns(Node *node, ParseState *pstate, Query *qry,
- List *groupClauses, List *groupClauseVars,
+ List *groupClauses, List *groupClauseCommonVars,
bool have_non_var_grouping,
List **func_grouped_rels);
static bool check_ungrouped_columns_walker(Node *node,
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 6189a068535..93508c2a87e 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -1208,7 +1208,7 @@ replace_rte_variables_mutator(Node *node,
* a ConvertRowtypeExpr to map back to the rowtype expected by the expression.
* (Therefore, to_rowtype had better be a child rowtype of the rowtype of the
* RTE we're changing references to.) Callers that don't provide to_rowtype
- * should report an error if *found_row_type is true; we don't do that here
+ * should report an error if *found_whole_row is true; we don't do that here
* because we don't know exactly what wording for the error message would
* be most appropriate. The caller will be aware of the context.
*
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 7332e6b5903..6f3a4028547 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -438,7 +438,8 @@ static void PinBuffer_Locked(BufferDesc *buf);
static void UnpinBuffer(BufferDesc *buf, bool fixOwner);
static void BufferSync(int flags);
static uint32 WaitBufHdrUnlocked(BufferDesc *buf);
-static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *flush_context);
+static int SyncOneBuffer(int buf_id, bool skip_recently_used,
+ WritebackContext *wb_context);
static void WaitIO(BufferDesc *buf);
static bool StartBufferIO(BufferDesc *buf, bool forInput);
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
@@ -2346,7 +2347,7 @@ BgBufferSync(WritebackContext *wb_context)
* BUF_REUSABLE: buffer is available for replacement, ie, it has
* pin count 0 and usage count 0.
*
- * (BUF_WRITTEN could be set in error if FlushBuffers finds the buffer clean
+ * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
* after locking it, but we don't care all that much.)
*
* Note: caller must have done ResourceOwnerEnlargeBuffers.
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index b773a760499..b40e6f3fde9 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -664,7 +664,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
/*
* Relative seek considers only the signed offset, ignoring
- * fileno. Note that large offsets (> 1 gig) risk overflow in this
+ * fileno. Note that large offsets (> 1 GB) risk overflow in this
* add, unless we have 64-bit off_t.
*/
newFile = file->curFile;
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 7b49802b4e1..315c74c7456 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -897,7 +897,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
/*
* set_max_safe_fds
- * Determine number of filedescriptors that fd.c is allowed to use
+ * Determine number of file descriptors that fd.c is allowed to use
*/
void
set_max_safe_fds(void)
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index c17b3f49dd0..2383094cfd1 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -223,7 +223,7 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
}
/*
- * GetRecordedFreePage - return the amount of free space on a particular page,
+ * GetRecordedFreeSpace - return the amount of free space on a particular page,
* according to the FSM.
*/
Size
@@ -417,7 +417,7 @@ fsm_space_cat_to_avail(uint8 cat)
/*
* Which category does a page need to have, to accommodate x bytes of data?
- * While fsm_size_to_avail_cat() rounds down, this needs to round up.
+ * While fsm_space_avail_to_cat() rounds down, this needs to round up.
*/
static uint8
fsm_space_needed_to_cat(Size needed)
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 6745a2432ef..1b7053cb1cf 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -216,9 +216,9 @@ static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
/*
* To make the fast-path lock mechanism work, we must have some way of
- * preventing the use of the fast-path when a conflicting lock might be
- * present. We partition* the locktag space into FAST_PATH_HASH_BUCKETS
- * partitions, and maintain an integer count of the number of "strong" lockers
+ * preventing the use of the fast-path when a conflicting lock might be present.
+ * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
+ * and maintain an integer count of the number of "strong" lockers
* in each partition. When any "strong" lockers are present (which is
* hopefully not very often), the fast-path mechanism can't be used, and we
* must fall back to the slower method of pushing matching locks directly
@@ -2709,7 +2709,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
}
/*
- * FastPathGetLockEntry
+ * FastPathGetRelationLockEntry
* Return the PROCLOCK for a lock originally taken via the fast-path,
* transferring it to the primary lock table if necessary.
*
@@ -2896,8 +2896,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
* the lock, then we needn't examine the individual relation IDs
* at all; none of them can be relevant.
*
- * See FastPathTransferLocks() for discussion of why we do this
- * test after acquiring the lock.
+ * See FastPathTransferRelationLocks() for discussion of why we do
+ * this test after acquiring the lock.
*/
if (proc->databaseId != locktag->locktag_field1)
{
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 565c3ac4397..2d709420c3d 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -3405,8 +3405,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
*
* If this value is changing, we don't care that much whether we get the
* old or new value -- it is just used to determine how far
- * GlobalSerializableXmin must advance before this transaction can be
- * fully cleaned up. The worst that could happen is we wait for one more
+ * SxactGlobalXmin must advance before this transaction can be fully
+ * cleaned up. The worst that could happen is we wait for one more
* transaction to complete before freeing some RAM; correctness of visible
* behavior is not affected.
*/
@@ -4820,7 +4820,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*
* If a dangerous structure is found, the pivot (the near conflict) is
* marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we fail without ever making progress. This transaction is
* committing writes, so letting it commit ensures progress. If we
* canceled the far conflict, it might immediately fail again on retry.
*/
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 206576d4bd3..b3115e4bea8 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -282,8 +282,6 @@ static const char *const numth[] = {"st", "nd", "rd", "th", NULL};
#define ALL_UPPER 2 /* NAME */
#define ALL_LOWER 3 /* name */
-#define FULL_SIZ 0
-
#define MAX_MONTH_LEN 9
#define MAX_MON_LEN 3
#define MAX_DAY_LEN 9
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5fdc3ca2513..3000b1735d0 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -146,7 +146,7 @@ emsgsize:
/*
* static char *
- * inet_cidr_ntop_ipv6(src, bits, fakebits, dst, size)
+ * inet_cidr_ntop_ipv6(src, bits, dst, size)
* convert IPv6 network number from network to presentation format.
* generates CIDR style result always. Picks the shortest representation
* unless the IP is really IPv4.
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 3e17032e56a..4ca0ed2bbbd 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -474,7 +474,7 @@ static char *flatten_reloptions(Oid relid);
/* ----------
- * get_ruledef - Do it all and return a text
+ * pg_get_ruledef - Do it all and return a text
* that could be used as a statement
* to recreate the rule
* ----------
@@ -594,7 +594,7 @@ pg_get_ruledef_worker(Oid ruleoid, int prettyFlags)
/* ----------
- * get_viewdef - Mainly the same thing, but we
+ * pg_get_viewdef - Mainly the same thing, but we
* only return the SELECT part of a view
* ----------
*/
@@ -789,7 +789,7 @@ pg_get_viewdef_worker(Oid viewoid, int prettyFlags, int wrapColumn)
}
/* ----------
- * get_triggerdef - Get the definition of a trigger
+ * pg_get_triggerdef - Get the definition of a trigger
* ----------
*/
Datum
@@ -1083,7 +1083,7 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
}
/* ----------
- * get_indexdef - Get the definition of an index
+ * pg_get_indexdef - Get the definition of an index
*
* In the extended version, there is a colno argument as well as pretty bool.
* if colno == 0, we want a complete index definition.
@@ -2342,7 +2342,7 @@ decompile_column_index_array(Datum column_index_array, Oid relId,
/* ----------
- * get_expr - Decompile an expression tree
+ * pg_get_expr - Decompile an expression tree
*
* Input: an expression tree in nodeToString form, and a relation OID
*
@@ -2440,7 +2440,7 @@ pg_get_expr_worker(text *expr, Oid relid, const char *relname, int prettyFlags)
/* ----------
- * get_userbyid - Get a user name by roleid and
+ * pg_get_userbyid - Get a user name by roleid and
* fallback to 'unknown (OID=n)'
* ----------
*/
@@ -6811,8 +6811,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This
- * routine is actually a callback for get_special_varno, which handles finding
- * the correct TargetEntry. We get the expression contained in that
+ * routine is actually a callback for resolve_special_varno, which handles
+ * finding the correct TargetEntry. We get the expression contained in that
* TargetEntry and just need to deparse it, a job we can throw back on
* get_rule_expr.
*/
@@ -11254,7 +11254,7 @@ flatten_reloptions(Oid relid)
}
/*
- * get_one_range_partition_bound_string
+ * get_range_partbound_string
* A C string representation of one range partition bound
*/
char *
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 900cd8357ca..6590e55a241 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -2235,8 +2235,8 @@ check_for_freed_segments(dsa_area *area)
/*
* Any other process that has freed a segment has incremented
- * free_segment_counter while holding an LWLock, and that must precede any
- * backend creating a new segment in the same slot while holding an
+ * freed_segment_counter while holding an LWLock, and that must precede
+ * any backend creating a new segment in the same slot while holding an
* LWLock, and that must precede the creation of any dsa_pointer pointing
* into the new segment which might reach us here, and the caller must
* have sent the dsa_pointer to this process using appropriate memory