aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Riggs <simon@2ndQuadrant.com>2011-11-22 09:48:06 +0000
committerSimon Riggs <simon@2ndQuadrant.com>2011-11-22 09:48:06 +0000
commit2d2841a56c8fa37a5dd5c6d33488ba6ca37116ff (patch)
tree729ecd63800d1ce98f9d0e472a0cf525c365de01
parenta4ffcc8e115ed637f69ecb0295d78cc97f08a483 (diff)
downloadpostgresql-2d2841a56c8fa37a5dd5c6d33488ba6ca37116ff.tar.gz
postgresql-2d2841a56c8fa37a5dd5c6d33488ba6ca37116ff.zip
Continue to allow VACUUM to mark last block of index dirty
even when there is no work to do. Further analysis required. Revert of patch c1458cc495ff800cd176a1c2e56d8b62680d9b71
-rw-r--r--src/backend/access/nbtree/nbtpage.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 6f6e676ec08..f9b3e1feafe 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -732,7 +732,7 @@ _bt_page_recyclable(Page page)
* and so must be scanned anyway during replay. We always write a WAL record
* for the last block in the index, whether or not it contained any items
* to be removed. This allows us to scan right up to end of index to
- * ensure correct locking. That is the only time we are called with nitems==0.
+ * ensure correct locking.
*/
void
_bt_delitems_vacuum(Relation rel, Buffer buf,
@@ -764,8 +764,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
- if (nitems > 0)
- MarkBufferDirty(buf);
+ MarkBufferDirty(buf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
@@ -805,11 +804,8 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM, rdata);
- if (nitems > 0)
- {
- PageSetLSN(page, recptr);
- PageSetTLI(page, ThisTimeLineID);
- }
+ PageSetLSN(page, recptr);
+ PageSetTLI(page, ThisTimeLineID);
}
END_CRIT_SECTION();