aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
authorMichael Paquier <michael@paquier.xyz>2019-07-16 13:23:53 +0900
committerMichael Paquier <michael@paquier.xyz>2019-07-16 13:23:53 +0900
commit0896ae561b6c799d45cb61d8a3b18fbb442130a7 (patch)
treecf4204ef18047e3c1eac4e9daa320156b106a374 /src/backend/storage
parent4c3d05d875dd173a81a995c6e14d69496b467eec (diff)
downloadpostgresql-0896ae561b6c799d45cb61d8a3b18fbb442130a7.tar.gz
postgresql-0896ae561b6c799d45cb61d8a3b18fbb442130a7.zip
Fix inconsistencies and typos in the tree
This is numbered take 7, and addresses a set of issues around: - Fixes for typos and incorrect reference names. - Removal of unneeded comments. - Removal of unreferenced functions and structures. - Fixes regarding variable name consistency. Author: Alexander Lakhin Discussion: https://postgr.es/m/10bfd4ac-3e7c-40ab-2b2e-355ed15495e8@gmail.com
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/buffer/bufmgr.c5
-rw-r--r--src/backend/storage/file/buffile.c2
-rw-r--r--src/backend/storage/file/fd.c2
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/lmgr/lock.c12
-rw-r--r--src/backend/storage/lmgr/predicate.c6
6 files changed, 16 insertions, 15 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 7332e6b5903..6f3a4028547 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -438,7 +438,8 @@ static void PinBuffer_Locked(BufferDesc *buf);
static void UnpinBuffer(BufferDesc *buf, bool fixOwner);
static void BufferSync(int flags);
static uint32 WaitBufHdrUnlocked(BufferDesc *buf);
-static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *flush_context);
+static int SyncOneBuffer(int buf_id, bool skip_recently_used,
+ WritebackContext *wb_context);
static void WaitIO(BufferDesc *buf);
static bool StartBufferIO(BufferDesc *buf, bool forInput);
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
@@ -2346,7 +2347,7 @@ BgBufferSync(WritebackContext *wb_context)
* BUF_REUSABLE: buffer is available for replacement, ie, it has
* pin count 0 and usage count 0.
*
- * (BUF_WRITTEN could be set in error if FlushBuffers finds the buffer clean
+ * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
* after locking it, but we don't care all that much.)
*
* Note: caller must have done ResourceOwnerEnlargeBuffers.
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index b773a760499..b40e6f3fde9 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -664,7 +664,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
/*
* Relative seek considers only the signed offset, ignoring
- * fileno. Note that large offsets (> 1 gig) risk overflow in this
+ * fileno. Note that large offsets (> 1 GB) risk overflow in this
* add, unless we have 64-bit off_t.
*/
newFile = file->curFile;
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 7b49802b4e1..315c74c7456 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -897,7 +897,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
/*
* set_max_safe_fds
- * Determine number of filedescriptors that fd.c is allowed to use
+ * Determine number of file descriptors that fd.c is allowed to use
*/
void
set_max_safe_fds(void)
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index c17b3f49dd0..2383094cfd1 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -223,7 +223,7 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
}
/*
- * GetRecordedFreePage - return the amount of free space on a particular page,
+ * GetRecordedFreeSpace - return the amount of free space on a particular page,
* according to the FSM.
*/
Size
@@ -417,7 +417,7 @@ fsm_space_cat_to_avail(uint8 cat)
/*
* Which category does a page need to have, to accommodate x bytes of data?
- * While fsm_size_to_avail_cat() rounds down, this needs to round up.
+ * While fsm_space_avail_to_cat() rounds down, this needs to round up.
*/
static uint8
fsm_space_needed_to_cat(Size needed)
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 6745a2432ef..1b7053cb1cf 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -216,9 +216,9 @@ static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
/*
* To make the fast-path lock mechanism work, we must have some way of
- * preventing the use of the fast-path when a conflicting lock might be
- * present. We partition* the locktag space into FAST_PATH_HASH_BUCKETS
- * partitions, and maintain an integer count of the number of "strong" lockers
+ * preventing the use of the fast-path when a conflicting lock might be present.
+ * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
+ * and maintain an integer count of the number of "strong" lockers
* in each partition. When any "strong" lockers are present (which is
* hopefully not very often), the fast-path mechanism can't be used, and we
* must fall back to the slower method of pushing matching locks directly
@@ -2709,7 +2709,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
}
/*
- * FastPathGetLockEntry
+ * FastPathGetRelationLockEntry
* Return the PROCLOCK for a lock originally taken via the fast-path,
* transferring it to the primary lock table if necessary.
*
@@ -2896,8 +2896,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
* the lock, then we needn't examine the individual relation IDs
* at all; none of them can be relevant.
*
- * See FastPathTransferLocks() for discussion of why we do this
- * test after acquiring the lock.
+ * See FastPathTransferRelationLocks() for discussion of why we do
+ * this test after acquiring the lock.
*/
if (proc->databaseId != locktag->locktag_field1)
{
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 565c3ac4397..2d709420c3d 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -3405,8 +3405,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
*
* If this value is changing, we don't care that much whether we get the
* old or new value -- it is just used to determine how far
- * GlobalSerializableXmin must advance before this transaction can be
- * fully cleaned up. The worst that could happen is we wait for one more
+ * SxactGlobalXmin must advance before this transaction can be fully
+ * cleaned up. The worst that could happen is we wait for one more
* transaction to complete before freeing some RAM; correctness of visible
* behavior is not affected.
*/
@@ -4820,7 +4820,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*
* If a dangerous structure is found, the pivot (the near conflict) is
* marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we fail without ever making progress. This transaction is
* committing writes, so letting it commit ensures progress. If we
* canceled the far conflict, it might immediately fail again on retry.
*/