aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSimon Riggs <simon@2ndQuadrant.com>2013-04-30 08:15:49 +0100
committerSimon Riggs <simon@2ndQuadrant.com>2013-04-30 08:15:49 +0100
commit730924397c8151c3cf34e633211cd0fe4a0db112 (patch)
tree52d6339ce9f3aa1038107ce24ee7d8993f790aa2 /src
parentfdea2530bd4afb3d512cb9700b1d8cd603fab0e7 (diff)
downloadpostgresql-730924397c8151c3cf34e633211cd0fe4a0db112.tar.gz
postgresql-730924397c8151c3cf34e633211cd0fe4a0db112.zip
Ensure we MarkBufferDirty before visibilitymap_set()
logs the heap page and sets the LSN. Otherwise a checkpoint could occur between those actions and leave us in an inconsistent state. Jeff Davis
Diffstat (limited to 'src')
-rw-r--r--src/backend/commands/vacuumlazy.c47
1 files changed, 26 insertions, 21 deletions
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 8a1ffcf0bd7..9d304153b8b 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -894,26 +894,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
freespace = PageGetHeapFreeSpace(page);
/* mark page all-visible, if appropriate */
- if (all_visible)
+ if (all_visible && !all_visible_according_to_vm)
{
- if (!PageIsAllVisible(page))
- {
- PageSetAllVisible(page);
- MarkBufferDirty(buf);
- visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
- vmbuffer, visibility_cutoff_xid);
- }
- else if (!all_visible_according_to_vm)
- {
- /*
- * It should never be the case that the visibility map page is
- * set while the page-level bit is clear, but the reverse is
- * allowed. Set the visibility map bit as well so that we get
- * back in sync.
- */
- visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
- vmbuffer, visibility_cutoff_xid);
- }
+ /*
+ * It should never be the case that the visibility map page is set
+ * while the page-level bit is clear, but the reverse is allowed
+ * (if checksums are not enabled). Regardless, set the both bits
+ * so that we get back in sync.
+ *
+ * NB: If the heap page is all-visible but the VM bit is not set,
+ * we don't need to dirty the heap page. However, if checksums are
+ * enabled, we do need to make sure that the heap page is dirtied
+ * before passing it to visibilitymap_set(), because it may be
+ * logged. Given that this situation should only happen in rare
+ * cases after a crash, it is not worth optimizing.
+ */
+ PageSetAllVisible(page);
+ MarkBufferDirty(buf);
+ visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
+ vmbuffer, visibility_cutoff_xid);
}
/*
@@ -1139,6 +1138,14 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
PageRepairFragmentation(page);
/*
+ * Mark buffer dirty before we write WAL.
+ *
+ * If checksums are enabled, visibilitymap_set() may log the heap page, so
+ * we must mark heap buffer dirty before calling visibilitymap_set().
+ */
+ MarkBufferDirty(buffer);
+
+ /*
* Now that we have removed the dead tuples from the page, once again check
* if the page has become all-visible.
*/
@@ -1151,8 +1158,6 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
visibility_cutoff_xid);
}
- MarkBufferDirty(buffer);
-
/* XLOG stuff */
if (RelationNeedsWAL(onerel))
{