diff options
Diffstat (limited to 'src/backend/access/heap')
-rw-r--r-- | src/backend/access/heap/heapam.c | 22 | ||||
-rw-r--r-- | src/backend/access/heap/pruneheap.c | 4 | ||||
-rw-r--r-- | src/backend/access/heap/vacuumlazy.c | 16 |
3 files changed, 35 insertions, 7 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 9cbc161d7a9..03d4abc938b 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -635,8 +635,15 @@ heapgettup(HeapScanDesc scan, } else { + /* + * The previous returned tuple may have been vacuumed since the + * previous scan when we use a non-MVCC snapshot, so we must + * re-establish the lineoff <= PageGetMaxOffsetNumber(dp) + * invariant + */ lineoff = /* previous offnum */ - OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self))); + Min(lines, + OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)))); } /* page and lineoff now reference the physically previous tid */ @@ -678,6 +685,13 @@ heapgettup(HeapScanDesc scan, lpp = PageGetItemId(dp, lineoff); for (;;) { + /* + * Only continue scanning the page while we have lines left. + * + * Note that this protects us from accessing line pointers past + * PageGetMaxOffsetNumber(); both for forward scans when we resume the + * table scan, and for when we start scanning a new page. + */ while (linesleft > 0) { if (ItemIdIsNormal(lpp)) @@ -8556,10 +8570,8 @@ heap_xlog_vacuum(XLogReaderState *record) ItemIdSetUnused(lp); } - /* - * Update the page's hint bit about whether it has free pointers - */ - PageSetHasFreeLinePointers(page); + /* Attempt to truncate line pointer array now */ + PageTruncateLinePointerArray(page); PageSetLSN(page, lsn); MarkBufferDirty(buffer); diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index f75502ca2c0..0c8e49d3e6c 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -962,6 +962,10 @@ heap_get_root_tuples(Page page, OffsetNumber *root_offsets) */ for (;;) { + /* Sanity check */ + if (nextoffnum < FirstOffsetNumber || nextoffnum > maxoff) + break; + lp = PageGetItemId(page, nextoffnum); /* Check for broken chains */ diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 446e3bc4523..1d55d0ecf9d 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1444,7 +1444,11 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming) { /* - * Wait until lazy_vacuum_heap_rel() to save free space. + * Wait until lazy_vacuum_heap_rel() to save free space. This + * doesn't just save us some cycles; it also allows us to record + * any additional free space that lazy_vacuum_heap_page() will + * make available in cases where it's possible to truncate the + * page's line pointer array. * * Note: The one-pass (no indexes) case is only supposed to make * it this far when there were no LP_DEAD items during pruning. @@ -2033,6 +2037,13 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) * Pages that never had lazy_scan_prune record LP_DEAD items are not visited * at all. * + * We may also be able to truncate the line pointer array of the heap pages we + * visit. If there is a contiguous group of LP_UNUSED items at the end of the + * array, it can be reclaimed as free space. These LP_UNUSED items usually + * start out as LP_DEAD items recorded by lazy_scan_prune (we set items from + * each page to LP_UNUSED, and then consider if it's possible to truncate the + * page's line pointer array). + * * Note: the reason for doing this as a second pass is we cannot remove the * tuples until we've removed their index entries, and we want to process * index entry removal in batches as large as possible. @@ -2175,7 +2186,8 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, Assert(uncnt > 0); - PageSetHasFreeLinePointers(page); + /* Attempt to truncate line pointer array now */ + PageTruncateLinePointerArray(page); /* * Mark buffer dirty before we write WAL. |