aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2025-04-02 14:25:17 -0400
committerAndres Freund <andres@anarazel.de>2025-04-02 14:54:20 -0400
commit459e7bf8e2f8ab894dc613fa8555b74c4eef6969 (patch)
treed89ead863ddc22c0615d244c97ce26d3cf9cda32 /src
parent0dca5d68d7bebf2c1036fd84875533afef6df992 (diff)
downloadpostgresql-459e7bf8e2f8ab894dc613fa8555b74c4eef6969.tar.gz
postgresql-459e7bf8e2f8ab894dc613fa8555b74c4eef6969.zip
Remove HeapBitmapScan's skip_fetch optimization
The optimization does not take the removal of TIDs by a concurrent vacuum into account. The concurrent vacuum can remove dead TIDs and make pages ALL_VISIBLE while those dead TIDs are referenced in the bitmap. This can lead to a skip_fetch scan returning too many tuples. It likely would be possible to implement this optimization safely, but we don't have the necessary infrastructure in place. Nor is it clear that it's worth building that infrastructure, given how limited the skip_fetch optimization is. In the backbranches we just disable the optimization by always passing need_tuples=true to table_beginscan_bm(). We can't perform API/ABI changes in the backbranches and we want to make the change as minimal as possible. Author: Matthias van de Meent <boekewurm+postgres@gmail.com> Reported-By: Konstantin Knizhnik <knizhnik@garret.ru> Discussion: https://postgr.es/m/CAEze2Wg3gXXZTr6_rwC+s4-o2ZVFB5F985uUSgJTsECx6AmGcQ@mail.gmail.com Backpatch-through: 13
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/heapam.c61
-rw-r--r--src/backend/access/heap/heapam_handler.c46
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c15
-rw-r--r--src/include/access/heapam.h12
-rw-r--r--src/include/access/tableam.h12
5 files changed, 13 insertions, 133 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index cedaa195cb6..5b3fe4a1d3b 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -314,31 +314,6 @@ bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
tbmres->blockno >= hscan->rs_nblocks)
continue;
- /*
- * We can skip fetching the heap page if we don't need any fields from
- * the heap, the bitmap entries don't need rechecking, and all tuples
- * on the page are visible to our transaction.
- */
- if (!(sscan->rs_flags & SO_NEED_TUPLES) &&
- !tbmres->recheck &&
- VM_ALL_VISIBLE(sscan->rs_rd, tbmres->blockno, &bscan->rs_vmbuffer))
- {
- OffsetNumber offsets[TBM_MAX_TUPLES_PER_PAGE];
- int noffsets;
-
- /* can't be lossy in the skip_fetch case */
- Assert(!tbmres->lossy);
- Assert(bscan->rs_empty_tuples_pending >= 0);
-
- /*
- * We throw away the offsets, but this is the easiest way to get a
- * count of tuples.
- */
- noffsets = tbm_extract_page_tuple(tbmres, offsets, TBM_MAX_TUPLES_PER_PAGE);
- bscan->rs_empty_tuples_pending += noffsets;
- continue;
- }
-
return tbmres->blockno;
}
@@ -1124,8 +1099,10 @@ heap_beginscan(Relation relation, Snapshot snapshot,
{
BitmapHeapScanDesc bscan = palloc(sizeof(BitmapHeapScanDescData));
- bscan->rs_vmbuffer = InvalidBuffer;
- bscan->rs_empty_tuples_pending = 0;
+ /*
+ * Bitmap Heap scans do not have any fields that a normal Heap Scan
+ * does not have, so no special initializations required here.
+ */
scan = (HeapScanDesc) bscan;
}
else
@@ -1280,23 +1257,10 @@ heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
scan->rs_cbuf = InvalidBuffer;
}
- if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
- {
- BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
-
- /*
- * Reset empty_tuples_pending, a field only used by bitmap heap scan,
- * to avoid incorrectly emitting NULL-filled tuples from a previous
- * scan on rescan.
- */
- bscan->rs_empty_tuples_pending = 0;
-
- if (BufferIsValid(bscan->rs_vmbuffer))
- {
- ReleaseBuffer(bscan->rs_vmbuffer);
- bscan->rs_vmbuffer = InvalidBuffer;
- }
- }
+ /*
+ * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
+ * additional data vs a normal HeapScan
+ */
/*
* The read stream is reset on rescan. This must be done before
@@ -1325,15 +1289,6 @@ heap_endscan(TableScanDesc sscan)
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
- if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
- {
- BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) sscan;
-
- bscan->rs_empty_tuples_pending = 0;
- if (BufferIsValid(bscan->rs_vmbuffer))
- ReleaseBuffer(bscan->rs_vmbuffer);
- }
-
/*
* Must free the read stream before freeing the BufferAccessStrategy.
*/
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 24d3765aa20..ac082fefa77 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -2138,32 +2138,6 @@ heapam_scan_bitmap_next_tuple(TableScanDesc scan,
while (hscan->rs_cindex >= hscan->rs_ntuples)
{
/*
- * Emit empty tuples before advancing to the next block
- */
- if (bscan->rs_empty_tuples_pending > 0)
- {
- /*
- * If we don't have to fetch the tuple, just return nulls.
- */
- ExecStoreAllNullTuple(slot);
- bscan->rs_empty_tuples_pending--;
-
- /*
- * We do not recheck all NULL tuples. Because the streaming read
- * API only yields TBMIterateResults for blocks actually fetched
- * from the heap, we must unset `recheck` ourselves here to ensure
- * correct results.
- *
- * Our read stream callback accrues a count of empty tuples to
- * emit and then emits them after emitting tuples from the next
- * fetched block. If no blocks need fetching, we'll emit the
- * accrued count at the end of the scan.
- */
- *recheck = false;
- return true;
- }
-
- /*
* Returns false if the bitmap is exhausted and there are no further
* blocks we need to scan.
*/
@@ -2516,24 +2490,8 @@ BitmapHeapScanNextBlock(TableScanDesc scan,
if (BufferIsInvalid(hscan->rs_cbuf))
{
- if (BufferIsValid(bscan->rs_vmbuffer))
- {
- ReleaseBuffer(bscan->rs_vmbuffer);
- bscan->rs_vmbuffer = InvalidBuffer;
- }
-
- /*
- * The bitmap is exhausted. Now emit any remaining empty tuples. The
- * read stream API only returns TBMIterateResults for blocks actually
- * fetched from the heap. Our callback will accrue a count of empty
- * tuples to emit for all blocks we skipped fetching. So, if we skip
- * fetching heap blocks at the end of the relation (or no heap blocks
- * are fetched) we need to ensure we emit empty tuples before ending
- * the scan. We don't recheck empty tuples so ensure `recheck` is
- * unset.
- */
- *recheck = false;
- return bscan->rs_empty_tuples_pending > 0;
+ /* the bitmap is exhausted */
+ return false;
}
Assert(per_buffer_data);
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 3e33360c0fc..bf24f3d7fe0 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -105,24 +105,11 @@ BitmapTableScanSetup(BitmapHeapScanState *node)
*/
if (!node->ss.ss_currentScanDesc)
{
- bool need_tuples = false;
-
- /*
- * We can potentially skip fetching heap pages if we do not need any
- * columns of the table, either for checking non-indexable quals or
- * for returning data. This test is a bit simplistic, as it checks
- * the stronger condition that there's no qual or return tlist at all.
- * But in most cases it's probably not worth working harder than that.
- */
- need_tuples = (node->ss.ps.plan->qual != NIL ||
- node->ss.ps.plan->targetlist != NIL);
-
node->ss.ss_currentScanDesc =
table_beginscan_bm(node->ss.ss_currentRelation,
node->ss.ps.state->es_snapshot,
0,
- NULL,
- need_tuples);
+ NULL);
}
node->ss.ss_currentScanDesc->st.rs_tbmiterator = tbmiterator;
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 1640d9c32f7..e48fe434cd3 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -103,17 +103,7 @@ typedef struct BitmapHeapScanDescData
{
HeapScanDescData rs_heap_base;
- /*
- * These fields are only used for bitmap scans for the "skip fetch"
- * optimization. Bitmap scans needing no fields from the heap may skip
- * fetching an all visible block, instead using the number of tuples per
- * block reported by the bitmap to determine how many NULL-filled tuples
- * to return. They are common to parallel and serial BitmapHeapScans
- */
-
- /* page of VM containing info for current block */
- Buffer rs_vmbuffer;
- int rs_empty_tuples_pending;
+ /* Holds no data */
} BitmapHeapScanDescData;
typedef struct BitmapHeapScanDescData *BitmapHeapScanDesc;
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index b8cb1e744ad..8713e12cbfb 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -62,13 +62,6 @@ typedef enum ScanOptions
/* unregister snapshot at scan end? */
SO_TEMP_SNAPSHOT = 1 << 9,
-
- /*
- * At the discretion of the table AM, bitmap table scans may be able to
- * skip fetching a block from the table if none of the table data is
- * needed. If table data may be needed, set SO_NEED_TUPLES.
- */
- SO_NEED_TUPLES = 1 << 10,
} ScanOptions;
/*
@@ -920,13 +913,10 @@ table_beginscan_strat(Relation rel, Snapshot snapshot,
*/
static inline TableScanDesc
table_beginscan_bm(Relation rel, Snapshot snapshot,
- int nkeys, struct ScanKeyData *key, bool need_tuple)
+ int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE;
- if (need_tuple)
- flags |= SO_NEED_TUPLES;
-
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key,
NULL, flags);
}