aboutsummaryrefslogtreecommitdiff
path: root/src/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/include')
-rw-r--r--src/include/access/nbtree.h3
-rw-r--r--src/include/access/nbtxlog.h31
-rw-r--r--src/include/access/xlog_internal.h2
3 files changed, 12 insertions, 24 deletions
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 780b69442ef..ef1eba06026 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -779,8 +779,7 @@ extern bool _bt_page_recyclable(Page page);
extern void _bt_delitems_delete(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, Relation heapRel);
extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
- OffsetNumber *itemnos, int nitems,
- BlockNumber lastBlockVacuumed);
+ OffsetNumber *deletable, int ndeletable);
extern int _bt_pagedel(Relation rel, Buffer buf);
/*
diff --git a/src/include/access/nbtxlog.h b/src/include/access/nbtxlog.h
index 91b9ee00cfc..260d4af85cc 100644
--- a/src/include/access/nbtxlog.h
+++ b/src/include/access/nbtxlog.h
@@ -134,7 +134,11 @@ typedef struct xl_btree_delete
#define SizeOfBtreeDelete (offsetof(xl_btree_delete, nitems) + sizeof(int))
/*
- * This is what we need to know about page reuse within btree.
+ * This is what we need to know about page reuse within btree. This record
+ * only exists to generate a conflict point for Hot Standby.
+ *
+ * Note that we must include a RelFileNode in the record because we don't
+ * actually register the buffer with the record.
*/
typedef struct xl_btree_reuse_page
{
@@ -150,32 +154,17 @@ typedef struct xl_btree_reuse_page
* The WAL record can represent deletion of any number of index tuples on a
* single index page when executed by VACUUM.
*
- * For MVCC scans, lastBlockVacuumed will be set to InvalidBlockNumber.
- * For a non-MVCC index scans there is an additional correctness requirement
- * for applying these changes during recovery, which is that we must do one
- * of these two things for every block in the index:
- * * lock the block for cleanup and apply any required changes
- * * EnsureBlockUnpinned()
- * The purpose of this is to ensure that no index scans started before we
- * finish scanning the index are still running by the time we begin to remove
- * heap tuples.
- *
- * Any changes to any one block are registered on just one WAL record. All
- * blocks that we need to run EnsureBlockUnpinned() are listed as a block range
- * starting from the last block vacuumed through until this one. Individual
- * block numbers aren't given.
- *
- * Note that the *last* WAL record in any vacuum of an index is allowed to
- * have a zero length array of offsets. Earlier records must have at least one.
+ * Note that the WAL record in any vacuum of an index must have at least one
+ * item to delete.
*/
typedef struct xl_btree_vacuum
{
- BlockNumber lastBlockVacuumed;
+ uint32 ndeleted;
- /* TARGET OFFSET NUMBERS FOLLOW */
+ /* DELETED TARGET OFFSET NUMBERS FOLLOW */
} xl_btree_vacuum;
-#define SizeOfBtreeVacuum (offsetof(xl_btree_vacuum, lastBlockVacuumed) + sizeof(BlockNumber))
+#define SizeOfBtreeVacuum (offsetof(xl_btree_vacuum, ndeleted) + sizeof(uint32))
/*
* This is what we need to know about marking an empty branch for deletion.
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index e295dc65fbe..b2772d4e4b8 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -31,7 +31,7 @@
/*
* Each page of XLOG file has a header like this:
*/
-#define XLOG_PAGE_MAGIC 0xD102 /* can be used as WAL version indicator */
+#define XLOG_PAGE_MAGIC 0xD103 /* can be used as WAL version indicator */
typedef struct XLogPageHeaderData
{