aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
authorJeff Davis <jdavis@postgresql.org>2022-11-11 08:40:01 -0800
committerJeff Davis <jdavis@postgresql.org>2022-11-11 08:49:30 -0800
commit3eb8eeccbee31597c5962de10dcb3930d780cb19 (patch)
tree116bdea5309170ec3f37cebb5659c2d6ede10ee4 /src/backend/access/heap/heapam.c
parent373679c4a82f04e6c16198cdffab1a6c56852956 (diff)
downloadpostgresql-3eb8eeccbee31597c5962de10dcb3930d780cb19.tar.gz
postgresql-3eb8eeccbee31597c5962de10dcb3930d780cb19.zip
Remove obsolete comments and code from prior to f8f4227976.
XLogReadBufferForRedo() and XLogReadBufferForRedoExtended() only return BLK_NEEDS_REDO if the record LSN is greater than the page LSN, so the redo routine doesn't need to do the LSN check again. Discussion: https://postgr.es/m/0c37b80e62b1f3007d5a6d1292bd8fa0c275627a.camel@j-davis.com
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 12be87efed4..560f1c81a2c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8827,12 +8827,6 @@ heap_xlog_visible(XLogReaderState *record)
* full-page writes. This exposes us to torn page hazards, but since
* we're not inspecting the existing page contents in any way, we
* don't care.
- *
- * However, all operations that clear the visibility map bit *do* bump
- * the LSN, and those operations will only be replayed if the XLOG LSN
- * follows the page LSN. Thus, if the page LSN has advanced past our
- * XLOG record's LSN, we mustn't mark the page all-visible, because
- * the subsequent update won't be replayed to clear the flag.
*/
page = BufferGetPage(buffer);
@@ -8901,20 +8895,8 @@ heap_xlog_visible(XLogReaderState *record)
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, blkno, &vmbuffer);
- /*
- * Don't set the bit if replay has already passed this point.
- *
- * It might be safe to do this unconditionally; if replay has passed
- * this point, we'll replay at least as far this time as we did
- * before, and if this bit needs to be cleared, the record responsible
- * for doing so should be again replayed, and clear it. For right
- * now, out of an abundance of conservatism, we use the same test here
- * we did for the heap page. If this results in a dropped bit, no
- * real harm is done; and the next VACUUM will fix it.
- */
- if (lsn > PageGetLSN(vmpage))
- visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
- xlrec->cutoff_xid, xlrec->flags);
+ visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
+ xlrec->cutoff_xid, xlrec->flags);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);