aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeBitmapHeapscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/nodeBitmapHeapscan.c')
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c165
1 files changed, 126 insertions, 39 deletions
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 6035b4dfd4c..b885f2a3a65 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -39,6 +39,7 @@
#include "access/relscan.h"
#include "access/transam.h"
+#include "access/visibilitymap.h"
#include "executor/execdebug.h"
#include "executor/nodeBitmapHeapscan.h"
#include "miscadmin.h"
@@ -225,9 +226,31 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
- * Fetch the current heap page and identify candidate tuples.
+ * We can skip fetching the heap page if we don't need any fields
+ * from the heap, and the bitmap entries don't need rechecking,
+ * and all tuples on the page are visible to our transaction.
*/
- bitgetpage(scan, tbmres);
+ node->skip_fetch = (node->can_skip_fetch &&
+ !tbmres->recheck &&
+ VM_ALL_VISIBLE(node->ss.ss_currentRelation,
+ tbmres->blockno,
+ &node->vmbuffer));
+
+ if (node->skip_fetch)
+ {
+ /*
+ * The number of tuples on this page is put into
+ * scan->rs_ntuples; note we don't fill scan->rs_vistuples.
+ */
+ scan->rs_ntuples = tbmres->ntuples;
+ }
+ else
+ {
+ /*
+ * Fetch the current heap page and identify candidate tuples.
+ */
+ bitgetpage(scan, tbmres);
+ }
if (tbmres->ntuples >= 0)
node->exact_pages++;
@@ -289,45 +312,55 @@ BitmapHeapNext(BitmapHeapScanState *node)
*/
BitmapPrefetch(node, scan);
- /*
- * Okay to fetch the tuple
- */
- targoffset = scan->rs_vistuples[scan->rs_cindex];
- dp = (Page) BufferGetPage(scan->rs_cbuf);
- lp = PageGetItemId(dp, targoffset);
- Assert(ItemIdIsNormal(lp));
+ if (node->skip_fetch)
+ {
+ /*
+ * If we don't have to fetch the tuple, just return nulls.
+ */
+ ExecStoreAllNullTuple(slot);
+ }
+ else
+ {
+ /*
+ * Okay to fetch the tuple.
+ */
+ targoffset = scan->rs_vistuples[scan->rs_cindex];
+ dp = (Page) BufferGetPage(scan->rs_cbuf);
+ lp = PageGetItemId(dp, targoffset);
+ Assert(ItemIdIsNormal(lp));
- scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
- scan->rs_ctup.t_len = ItemIdGetLength(lp);
- scan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
- ItemPointerSet(&scan->rs_ctup.t_self, tbmres->blockno, targoffset);
+ scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
+ scan->rs_ctup.t_len = ItemIdGetLength(lp);
+ scan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
+ ItemPointerSet(&scan->rs_ctup.t_self, tbmres->blockno, targoffset);
- pgstat_count_heap_fetch(scan->rs_rd);
+ pgstat_count_heap_fetch(scan->rs_rd);
- /*
- * Set up the result slot to point to this tuple. Note that the slot
- * acquires a pin on the buffer.
- */
- ExecStoreTuple(&scan->rs_ctup,
- slot,
- scan->rs_cbuf,
- false);
-
- /*
- * If we are using lossy info, we have to recheck the qual conditions
- * at every tuple.
- */
- if (tbmres->recheck)
- {
- econtext->ecxt_scantuple = slot;
- ResetExprContext(econtext);
+ /*
+ * Set up the result slot to point to this tuple. Note that the
+ * slot acquires a pin on the buffer.
+ */
+ ExecStoreTuple(&scan->rs_ctup,
+ slot,
+ scan->rs_cbuf,
+ false);
- if (!ExecQual(node->bitmapqualorig, econtext))
+ /*
+ * If we are using lossy info, we have to recheck the qual
+ * conditions at every tuple.
+ */
+ if (tbmres->recheck)
{
- /* Fails recheck, so drop it and loop back for another */
- InstrCountFiltered2(node, 1);
- ExecClearTuple(slot);
- continue;
+ econtext->ecxt_scantuple = slot;
+ ResetExprContext(econtext);
+
+ if (!ExecQual(node->bitmapqualorig, econtext))
+ {
+ /* Fails recheck, so drop it and loop back for another */
+ InstrCountFiltered2(node, 1);
+ ExecClearTuple(slot);
+ continue;
+ }
}
}
@@ -582,6 +615,7 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan)
while (node->prefetch_pages < node->prefetch_target)
{
TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
+ bool skip_fetch;
if (tbmpre == NULL)
{
@@ -591,7 +625,26 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan)
break;
}
node->prefetch_pages++;
- PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
+
+ /*
+ * If we expect not to have to actually read this heap page,
+ * skip this prefetch call, but continue to run the prefetch
+ * logic normally. (Would it be better not to increment
+ * prefetch_pages?)
+ *
+ * This depends on the assumption that the index AM will
+ * report the same recheck flag for this future heap page as
+ * it did for the current heap page; which is not a certainty
+ * but is true in many cases.
+ */
+ skip_fetch = (node->can_skip_fetch &&
+ (node->tbmres ? !node->tbmres->recheck : false) &&
+ VM_ALL_VISIBLE(node->ss.ss_currentRelation,
+ tbmpre->blockno,
+ &node->pvmbuffer));
+
+ if (!skip_fetch)
+ PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
}
}
@@ -608,6 +661,7 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan)
{
TBMIterateResult *tbmpre;
bool do_prefetch = false;
+ bool skip_fetch;
/*
* Recheck under the mutex. If some other process has already
@@ -633,7 +687,15 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan)
break;
}
- PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
+ /* As above, skip prefetch if we expect not to need page */
+ skip_fetch = (node->can_skip_fetch &&
+ (node->tbmres ? !node->tbmres->recheck : false) &&
+ VM_ALL_VISIBLE(node->ss.ss_currentRelation,
+ tbmpre->blockno,
+ &node->pvmbuffer));
+
+ if (!skip_fetch)
+ PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
}
}
}
@@ -687,6 +749,7 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
/* rescan to release any page pin */
heap_rescan(node->ss.ss_currentScanDesc, NULL);
+ /* release bitmaps and buffers if any */
if (node->tbmiterator)
tbm_end_iterate(node->tbmiterator);
if (node->prefetch_iterator)
@@ -697,6 +760,10 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
tbm_end_shared_iterate(node->shared_prefetch_iterator);
if (node->tbm)
tbm_free(node->tbm);
+ if (node->vmbuffer != InvalidBuffer)
+ ReleaseBuffer(node->vmbuffer);
+ if (node->pvmbuffer != InvalidBuffer)
+ ReleaseBuffer(node->pvmbuffer);
node->tbm = NULL;
node->tbmiterator = NULL;
node->tbmres = NULL;
@@ -704,6 +771,8 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
node->initialized = false;
node->shared_tbmiterator = NULL;
node->shared_prefetch_iterator = NULL;
+ node->vmbuffer = InvalidBuffer;
+ node->pvmbuffer = InvalidBuffer;
ExecScanReScan(&node->ss);
@@ -748,7 +817,7 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node)
ExecEndNode(outerPlanState(node));
/*
- * release bitmap if any
+ * release bitmaps and buffers if any
*/
if (node->tbmiterator)
tbm_end_iterate(node->tbmiterator);
@@ -760,6 +829,10 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node)
tbm_end_shared_iterate(node->shared_tbmiterator);
if (node->shared_prefetch_iterator)
tbm_end_shared_iterate(node->shared_prefetch_iterator);
+ if (node->vmbuffer != InvalidBuffer)
+ ReleaseBuffer(node->vmbuffer);
+ if (node->pvmbuffer != InvalidBuffer)
+ ReleaseBuffer(node->pvmbuffer);
/*
* close heap scan
@@ -805,6 +878,9 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
scanstate->tbm = NULL;
scanstate->tbmiterator = NULL;
scanstate->tbmres = NULL;
+ scanstate->skip_fetch = false;
+ scanstate->vmbuffer = InvalidBuffer;
+ scanstate->pvmbuffer = InvalidBuffer;
scanstate->exact_pages = 0;
scanstate->lossy_pages = 0;
scanstate->prefetch_iterator = NULL;
@@ -815,9 +891,20 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
scanstate->pscan_len = 0;
scanstate->initialized = false;
scanstate->shared_tbmiterator = NULL;
+ scanstate->shared_prefetch_iterator = NULL;
scanstate->pstate = NULL;
/*
+ * We can potentially skip fetching heap pages if we do not need any
+ * columns of the table, either for checking non-indexable quals or for
+ * returning data. This test is a bit simplistic, as it checks the
+ * stronger condition that there's no qual or return tlist at all. But in
+ * most cases it's probably not worth working harder than that.
+ */
+ scanstate->can_skip_fetch = (node->scan.plan.qual == NIL &&
+ node->scan.plan.targetlist == NIL);
+
+ /*
* Miscellaneous initialization
*
* create expression context for node