aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/heap/pruneheap.c4
-rw-r--r--src/backend/access/heap/vacuumlazy.c11
-rw-r--r--src/backend/storage/page/bufpage.c35
3 files changed, 33 insertions, 17 deletions
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 4656f1b3db4..98d31de0031 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -244,7 +244,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* Prune and repair fragmentation in the specified page.
*
* Caller must have pin and buffer cleanup lock on the page. Note that we
- * don't update the FSM information for page on caller's behalf.
+ * don't update the FSM information for page on caller's behalf. Caller might
+ * also need to account for a reduction in the length of the line pointer
+ * array following array truncation by us.
*
* vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
* (see heap_prune_satisfies_vacuum and
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 092b739dda9..e1cac74e620 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -223,7 +223,7 @@ typedef struct LVRelState
*/
typedef struct LVPagePruneState
{
- bool hastup; /* Page is truncatable? */
+ bool hastup; /* Page prevents rel truncation? */
bool has_lpdead_items; /* includes existing LP_DEAD items */
/*
@@ -1393,7 +1393,7 @@ lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block,
*
* It's necessary to consider new pages as a special case, since the rules for
* maintaining the visibility map and FSM with empty pages are a little
- * different (though new pages can be truncated based on the usual rules).
+ * different (though new pages can be truncated away during rel truncation).
*
* Empty pages are not really a special case -- they're just heap pages that
* have no allocated tuples (including even LP_UNUSED items). You might
@@ -1561,6 +1561,11 @@ lazy_scan_prune(LVRelState *vacrel,
Assert(BufferGetBlockNumber(buf) == blkno);
+ /*
+ * maxoff might be reduced following line pointer array truncation in
+ * heap_page_prune. That's safe for us to ignore, since the reclaimed
+ * space will continue to look like LP_UNUSED items below.
+ */
maxoff = PageGetMaxOffsetNumber(page);
retry:
@@ -1768,7 +1773,7 @@ retry:
* Check tuple left behind after pruning to see if needs to be frozen
* now.
*/
- prunestate->hastup = true; /* page won't be truncatable */
+ prunestate->hastup = true; /* page makes rel truncation unsafe */
if (heap_prepare_freeze_tuple(tuple.t_data,
vacrel->relfrozenxid,
vacrel->relminmxid,
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 147ba4d9232..366d57ea7ac 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -688,22 +688,14 @@ compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorte
*
* This routine is usable for heap pages only, but see PageIndexMultiDelete.
*
- * Never removes unused line pointers. PageTruncateLinePointerArray can
- * safely remove some unused line pointers. It ought to be safe for this
- * routine to free unused line pointers in roughly the same way, but it's not
- * clear that that would be beneficial.
- *
- * PageTruncateLinePointerArray is only called during VACUUM's second pass
- * over the heap. Any unused line pointers that it sees are likely to have
- * been set to LP_UNUSED (from LP_DEAD) immediately before the time it is
- * called. On the other hand, many tables have the vast majority of all
- * required pruning performed opportunistically (not during VACUUM). And so
- * there is, in general, a good chance that even large groups of unused line
- * pointers that we see here will be recycled quickly.
+ * This routine removes unused line pointers from the end of the line pointer
+ * array. This is possible when dead heap-only tuples get removed by pruning,
+ * especially when there were HOT chains with several tuples each beforehand.
*
* Caller had better have a full cleanup lock on page's buffer. As a side
* effect the page's PD_HAS_FREE_LINES hint bit will be set or unset as
- * needed.
+ * needed. Caller might also need to account for a reduction in the length of
+ * the line pointer array following array truncation.
*/
void
PageRepairFragmentation(Page page)
@@ -718,6 +710,7 @@ PageRepairFragmentation(Page page)
int nline,
nstorage,
nunused;
+ OffsetNumber finalusedlp = InvalidOffsetNumber;
int i;
Size totallen;
bool presorted = true; /* For now */
@@ -771,10 +764,13 @@ PageRepairFragmentation(Page page)
totallen += itemidptr->alignedlen;
itemidptr++;
}
+
+ finalusedlp = i; /* Could be the final non-LP_UNUSED item */
}
else
{
/* Unused entries should have lp_len = 0, but make sure */
+ Assert(!ItemIdHasStorage(lp));
ItemIdSetUnused(lp);
nunused++;
}
@@ -798,6 +794,19 @@ PageRepairFragmentation(Page page)
compactify_tuples(itemidbase, nstorage, page, presorted);
}
+ if (finalusedlp != nline)
+ {
+ /* The last line pointer is not the last used line pointer */
+ int nunusedend = nline - finalusedlp;
+
+ Assert(nunused >= nunusedend && nunusedend > 0);
+
+ /* remove trailing unused line pointers from the count */
+ nunused -= nunusedend;
+ /* truncate the line pointer array */
+ ((PageHeader) page)->pd_lower -= (sizeof(ItemIdData) * nunusedend);
+ }
+
/* Set hint bit for PageAddItemExtended */
if (nunused > 0)
PageSetHasFreeLinePointers(page);