aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/index
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/index')
-rw-r--r--src/backend/access/index/genam.c110
-rw-r--r--src/backend/access/index/indexam.c164
2 files changed, 126 insertions, 148 deletions
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index e0a5ea42d52..5222966e510 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -22,6 +22,7 @@
#include "access/genam.h"
#include "access/heapam.h"
#include "access/relscan.h"
+#include "access/tableam.h"
#include "access/transam.h"
#include "catalog/index.h"
#include "lib/stringinfo.h"
@@ -83,6 +84,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
scan = (IndexScanDesc) palloc(sizeof(IndexScanDescData));
scan->heapRelation = NULL; /* may be set later */
+ scan->xs_heapfetch = NULL;
scan->indexRelation = indexRelation;
scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */
scan->numberOfKeys = nkeys;
@@ -123,11 +125,6 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
scan->xs_hitup = NULL;
scan->xs_hitupdesc = NULL;
- ItemPointerSetInvalid(&scan->xs_ctup.t_self);
- scan->xs_ctup.t_data = NULL;
- scan->xs_cbuf = InvalidBuffer;
- scan->xs_continue_hot = false;
-
return scan;
}
@@ -335,6 +332,7 @@ systable_beginscan(Relation heapRelation,
sysscan->heap_rel = heapRelation;
sysscan->irel = irel;
+ sysscan->slot = table_slot_create(heapRelation, NULL);
if (snapshot == NULL)
{
@@ -384,9 +382,9 @@ systable_beginscan(Relation heapRelation,
* disadvantage; and there are no compensating advantages, because
* it's unlikely that such scans will occur in parallel.
*/
- sysscan->scan = heap_beginscan_strat(heapRelation, snapshot,
- nkeys, key,
- true, false);
+ sysscan->scan = table_beginscan_strat(heapRelation, snapshot,
+ nkeys, key,
+ true, false);
sysscan->iscan = NULL;
}
@@ -401,28 +399,46 @@ systable_beginscan(Relation heapRelation,
* Note that returned tuple is a reference to data in a disk buffer;
* it must not be modified, and should be presumed inaccessible after
* next getnext() or endscan() call.
+ *
+ * XXX: It'd probably make sense to offer a slot based interface, at least
+ * optionally.
*/
HeapTuple
systable_getnext(SysScanDesc sysscan)
{
- HeapTuple htup;
+ HeapTuple htup = NULL;
if (sysscan->irel)
{
- htup = index_getnext(sysscan->iscan, ForwardScanDirection);
+ if (index_getnext_slot(sysscan->iscan, ForwardScanDirection, sysscan->slot))
+ {
+ bool shouldFree;
- /*
- * We currently don't need to support lossy index operators for any
- * system catalog scan. It could be done here, using the scan keys to
- * drive the operator calls, if we arranged to save the heap attnums
- * during systable_beginscan(); this is practical because we still
- * wouldn't need to support indexes on expressions.
- */
- if (htup && sysscan->iscan->xs_recheck)
- elog(ERROR, "system catalog scans with lossy index conditions are not implemented");
+ htup = ExecFetchSlotHeapTuple(sysscan->slot, false, &shouldFree);
+ Assert(!shouldFree);
+
+ /*
+ * We currently don't need to support lossy index operators for
+ * any system catalog scan. It could be done here, using the scan
+ * keys to drive the operator calls, if we arranged to save the
+ * heap attnums during systable_beginscan(); this is practical
+ * because we still wouldn't need to support indexes on
+ * expressions.
+ */
+ if (sysscan->iscan->xs_recheck)
+ elog(ERROR, "system catalog scans with lossy index conditions are not implemented");
+ }
}
else
- htup = heap_getnext(sysscan->scan, ForwardScanDirection);
+ {
+ if (table_scan_getnextslot(sysscan->scan, ForwardScanDirection, sysscan->slot))
+ {
+ bool shouldFree;
+
+ htup = ExecFetchSlotHeapTuple(sysscan->slot, false, &shouldFree);
+ Assert(!shouldFree);
+ }
+ }
return htup;
}
@@ -446,37 +462,20 @@ systable_recheck_tuple(SysScanDesc sysscan, HeapTuple tup)
Snapshot freshsnap;
bool result;
+ Assert(tup == ExecFetchSlotHeapTuple(sysscan->slot, false, NULL));
+
/*
- * Trust that LockBuffer() and HeapTupleSatisfiesMVCC() do not themselves
+ * Trust that table_tuple_satisfies_snapshot() and its subsidiaries
+ * (commonly LockBuffer() and HeapTupleSatisfiesMVCC()) do not themselves
* acquire snapshots, so we need not register the snapshot. Those
* facilities are too low-level to have any business scanning tables.
*/
freshsnap = GetCatalogSnapshot(RelationGetRelid(sysscan->heap_rel));
- if (sysscan->irel)
- {
- IndexScanDesc scan = sysscan->iscan;
-
- Assert(IsMVCCSnapshot(scan->xs_snapshot));
- Assert(tup == &scan->xs_ctup);
- Assert(BufferIsValid(scan->xs_cbuf));
- /* must hold a buffer lock to call HeapTupleSatisfiesVisibility */
- LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
- result = HeapTupleSatisfiesVisibility(tup, freshsnap, scan->xs_cbuf);
- LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
- }
- else
- {
- HeapScanDesc scan = sysscan->scan;
-
- Assert(IsMVCCSnapshot(scan->rs_snapshot));
- Assert(tup == &scan->rs_ctup);
- Assert(BufferIsValid(scan->rs_cbuf));
- /* must hold a buffer lock to call HeapTupleSatisfiesVisibility */
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- result = HeapTupleSatisfiesVisibility(tup, freshsnap, scan->rs_cbuf);
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
- }
+ result = table_tuple_satisfies_snapshot(sysscan->heap_rel,
+ sysscan->slot,
+ freshsnap);
+
return result;
}
@@ -488,13 +487,19 @@ systable_recheck_tuple(SysScanDesc sysscan, HeapTuple tup)
void
systable_endscan(SysScanDesc sysscan)
{
+ if (sysscan->slot)
+ {
+ ExecDropSingleTupleTableSlot(sysscan->slot);
+ sysscan->slot = NULL;
+ }
+
if (sysscan->irel)
{
index_endscan(sysscan->iscan);
index_close(sysscan->irel, AccessShareLock);
}
else
- heap_endscan(sysscan->scan);
+ table_endscan(sysscan->scan);
if (sysscan->snapshot)
UnregisterSnapshot(sysscan->snapshot);
@@ -541,6 +546,7 @@ systable_beginscan_ordered(Relation heapRelation,
sysscan->heap_rel = heapRelation;
sysscan->irel = indexRelation;
+ sysscan->slot = table_slot_create(heapRelation, NULL);
if (snapshot == NULL)
{
@@ -586,10 +592,12 @@ systable_beginscan_ordered(Relation heapRelation,
HeapTuple
systable_getnext_ordered(SysScanDesc sysscan, ScanDirection direction)
{
- HeapTuple htup;
+ HeapTuple htup = NULL;
Assert(sysscan->irel);
- htup = index_getnext(sysscan->iscan, direction);
+ if (index_getnext_slot(sysscan->iscan, direction, sysscan->slot))
+ htup = ExecFetchSlotHeapTuple(sysscan->slot, false, NULL);
+
/* See notes in systable_getnext */
if (htup && sysscan->iscan->xs_recheck)
elog(ERROR, "system catalog scans with lossy index conditions are not implemented");
@@ -603,6 +611,12 @@ systable_getnext_ordered(SysScanDesc sysscan, ScanDirection direction)
void
systable_endscan_ordered(SysScanDesc sysscan)
{
+ if (sysscan->slot)
+ {
+ ExecDropSingleTupleTableSlot(sysscan->slot);
+ sysscan->slot = NULL;
+ }
+
Assert(sysscan->irel);
index_endscan(sysscan->iscan);
if (sysscan->snapshot)
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 4ad30186d97..ae1c87ebadd 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -72,6 +72,7 @@
#include "access/amapi.h"
#include "access/heapam.h"
#include "access/relscan.h"
+#include "access/tableam.h"
#include "access/transam.h"
#include "access/xlog.h"
#include "catalog/index.h"
@@ -235,6 +236,9 @@ index_beginscan(Relation heapRelation,
scan->heapRelation = heapRelation;
scan->xs_snapshot = snapshot;
+ /* prepare to fetch index matches from table */
+ scan->xs_heapfetch = table_index_fetch_begin(heapRelation);
+
return scan;
}
@@ -318,16 +322,12 @@ index_rescan(IndexScanDesc scan,
Assert(nkeys == scan->numberOfKeys);
Assert(norderbys == scan->numberOfOrderBys);
- /* Release any held pin on a heap page */
- if (BufferIsValid(scan->xs_cbuf))
- {
- ReleaseBuffer(scan->xs_cbuf);
- scan->xs_cbuf = InvalidBuffer;
- }
-
- scan->xs_continue_hot = false;
+ /* Release resources (like buffer pins) from table accesses */
+ if (scan->xs_heapfetch)
+ table_index_fetch_reset(scan->xs_heapfetch);
scan->kill_prior_tuple = false; /* for safety */
+ scan->xs_heap_continue = false;
scan->indexRelation->rd_indam->amrescan(scan, keys, nkeys,
orderbys, norderbys);
@@ -343,11 +343,11 @@ index_endscan(IndexScanDesc scan)
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amendscan);
- /* Release any held pin on a heap page */
- if (BufferIsValid(scan->xs_cbuf))
+ /* Release resources (like buffer pins) from table accesses */
+ if (scan->xs_heapfetch)
{
- ReleaseBuffer(scan->xs_cbuf);
- scan->xs_cbuf = InvalidBuffer;
+ table_index_fetch_end(scan->xs_heapfetch);
+ scan->xs_heapfetch = NULL;
}
/* End the AM's scan */
@@ -379,17 +379,16 @@ index_markpos(IndexScanDesc scan)
/* ----------------
* index_restrpos - restore a scan position
*
- * NOTE: this only restores the internal scan state of the index AM.
- * The current result tuple (scan->xs_ctup) doesn't change. See comments
- * for ExecRestrPos().
- *
- * NOTE: in the presence of HOT chains, mark/restore only works correctly
- * if the scan's snapshot is MVCC-safe; that ensures that there's at most one
- * returnable tuple in each HOT chain, and so restoring the prior state at the
- * granularity of the index AM is sufficient. Since the only current user
- * of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
- * if necessary, but for now it seems unimportant.
+ * NOTE: this only restores the internal scan state of the index AM. See
+ * comments for ExecRestrPos().
+ *
+ * NOTE: For heap, in the presence of HOT chains, mark/restore only works
+ * correctly if the scan's snapshot is MVCC-safe; that ensures that there's at
+ * most one returnable tuple in each HOT chain, and so restoring the prior
+ * state at the granularity of the index AM is sufficient. Since the only
+ * current user of mark/restore functionality is nodeMergejoin.c, this
+ * effectively means that merge-join plans only work for MVCC snapshots. This
+ * could be fixed if necessary, but for now it seems unimportant.
* ----------------
*/
void
@@ -400,9 +399,12 @@ index_restrpos(IndexScanDesc scan)
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amrestrpos);
- scan->xs_continue_hot = false;
+ /* release resources (like buffer pins) from table accesses */
+ if (scan->xs_heapfetch)
+ table_index_fetch_reset(scan->xs_heapfetch);
scan->kill_prior_tuple = false; /* for safety */
+ scan->xs_heap_continue = false;
scan->indexRelation->rd_indam->amrestrpos(scan);
}
@@ -483,6 +485,9 @@ index_parallelrescan(IndexScanDesc scan)
{
SCAN_CHECKS;
+ if (scan->xs_heapfetch)
+ table_index_fetch_reset(scan->xs_heapfetch);
+
/* amparallelrescan is optional; assume no-op if not provided by AM */
if (scan->indexRelation->rd_indam->amparallelrescan != NULL)
scan->indexRelation->rd_indam->amparallelrescan(scan);
@@ -513,6 +518,9 @@ index_beginscan_parallel(Relation heaprel, Relation indexrel, int nkeys,
scan->heapRelation = heaprel;
scan->xs_snapshot = snapshot;
+ /* prepare to fetch index matches from table */
+ scan->xs_heapfetch = table_index_fetch_begin(heaprel);
+
return scan;
}
@@ -535,7 +543,7 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
/*
* The AM's amgettuple proc finds the next index entry matching the scan
- * keys, and puts the TID into scan->xs_ctup.t_self. It should also set
+ * keys, and puts the TID into scan->xs_heaptid. It should also set
* scan->xs_recheck and possibly scan->xs_itup/scan->xs_hitup, though we
* pay no attention to those fields here.
*/
@@ -543,23 +551,23 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
/* Reset kill flag immediately for safety */
scan->kill_prior_tuple = false;
+ scan->xs_heap_continue = false;
/* If we're out of index entries, we're done */
if (!found)
{
- /* ... but first, release any held pin on a heap page */
- if (BufferIsValid(scan->xs_cbuf))
- {
- ReleaseBuffer(scan->xs_cbuf);
- scan->xs_cbuf = InvalidBuffer;
- }
+ /* release resources (like buffer pins) from table accesses */
+ if (scan->xs_heapfetch)
+ table_index_fetch_reset(scan->xs_heapfetch);
+
return NULL;
}
+ Assert(ItemPointerIsValid(&scan->xs_heaptid));
pgstat_count_index_tuples(scan->indexRelation, 1);
/* Return the TID of the tuple we found. */
- return &scan->xs_ctup.t_self;
+ return &scan->xs_heaptid;
}
/* ----------------
@@ -580,53 +588,18 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
* enough information to do it efficiently in the general case.
* ----------------
*/
-HeapTuple
-index_fetch_heap(IndexScanDesc scan)
+bool
+index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot)
{
- ItemPointer tid = &scan->xs_ctup.t_self;
bool all_dead = false;
- bool got_heap_tuple;
-
- /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
- if (!scan->xs_continue_hot)
- {
- /* Switch to correct buffer if we don't have it already */
- Buffer prev_buf = scan->xs_cbuf;
-
- scan->xs_cbuf = ReleaseAndReadBuffer(scan->xs_cbuf,
- scan->heapRelation,
- ItemPointerGetBlockNumber(tid));
+ bool found;
- /*
- * Prune page, but only if we weren't already on this page
- */
- if (prev_buf != scan->xs_cbuf)
- heap_page_prune_opt(scan->heapRelation, scan->xs_cbuf);
- }
+ found = table_index_fetch_tuple(scan->xs_heapfetch, &scan->xs_heaptid,
+ scan->xs_snapshot, slot,
+ &scan->xs_heap_continue, &all_dead);
- /* Obtain share-lock on the buffer so we can examine visibility */
- LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
- got_heap_tuple = heap_hot_search_buffer(tid, scan->heapRelation,
- scan->xs_cbuf,
- scan->xs_snapshot,
- &scan->xs_ctup,
- &all_dead,
- !scan->xs_continue_hot);
- LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
-
- if (got_heap_tuple)
- {
- /*
- * Only in a non-MVCC snapshot can more than one member of the HOT
- * chain be visible.
- */
- scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
+ if (found)
pgstat_count_heap_fetch(scan->indexRelation);
- return &scan->xs_ctup;
- }
-
- /* We've reached the end of the HOT chain. */
- scan->xs_continue_hot = false;
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
@@ -638,17 +611,17 @@ index_fetch_heap(IndexScanDesc scan)
if (!scan->xactStartedInRecovery)
scan->kill_prior_tuple = all_dead;
- return NULL;
+ return found;
}
/* ----------------
- * index_getnext - get the next heap tuple from a scan
+ * index_getnext_slot - get the next tuple from a scan
*
- * The result is the next heap tuple satisfying the scan keys and the
- * snapshot, or NULL if no more matching tuples exist.
+ * The result is true if a tuple satisfying the scan keys and the snapshot was
+ * found, false otherwise. The tuple is stored in the specified slot.
*
- * On success, the buffer containing the heap tup is pinned (the pin will be
- * dropped in a future index_getnext_tid, index_fetch_heap or index_endscan
+ * On success, resources (like buffer pins) are likely to be held, and will be
+ * dropped by a future index_getnext_tid, index_fetch_heap or index_endscan
* call).
*
* Note: caller must check scan->xs_recheck, and perform rechecking of the
@@ -656,32 +629,23 @@ index_fetch_heap(IndexScanDesc scan)
* enough information to do it efficiently in the general case.
* ----------------
*/
-HeapTuple
-index_getnext(IndexScanDesc scan, ScanDirection direction)
+bool
+index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
{
- HeapTuple heapTuple;
- ItemPointer tid;
-
for (;;)
{
- if (scan->xs_continue_hot)
- {
- /*
- * We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
- */
- Assert(BufferIsValid(scan->xs_cbuf));
- Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) ==
- BufferGetBlockNumber(scan->xs_cbuf));
- }
- else
+ if (!scan->xs_heap_continue)
{
+ ItemPointer tid;
+
/* Time to fetch the next TID from the index */
tid = index_getnext_tid(scan, direction);
/* If we're out of index entries, we're done */
if (tid == NULL)
break;
+
+ Assert(ItemPointerEquals(tid, &scan->xs_heaptid));
}
/*
@@ -689,12 +653,12 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
* If we don't find anything, loop around and grab the next TID from
* the index.
*/
- heapTuple = index_fetch_heap(scan);
- if (heapTuple != NULL)
- return heapTuple;
+ Assert(ItemPointerIsValid(&scan->xs_heaptid));
+ if (index_fetch_heap(scan, slot))
+ return true;
}
- return NULL; /* failure exit */
+ return false;
}
/* ----------------