aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeSeqscan.c
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2019-03-11 12:46:41 -0700
committerAndres Freund <andres@anarazel.de>2019-03-11 12:46:41 -0700
commitc2fe139c201c48f1133e9fbea2dd99b8efe2fadd (patch)
treeab0a6261b412b8284b6c91af158f72af97e02a35 /src/backend/executor/nodeSeqscan.c
parenta47841528107921f02c280e0c5f91c5a1d86adb0 (diff)
downloadpostgresql-c2fe139c201c48f1133e9fbea2dd99b8efe2fadd.tar.gz
postgresql-c2fe139c201c48f1133e9fbea2dd99b8efe2fadd.zip
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
Diffstat (limited to 'src/backend/executor/nodeSeqscan.c')
-rw-r--r--src/backend/executor/nodeSeqscan.c67
1 files changed, 26 insertions, 41 deletions
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index e5482859efc..8bd7430a918 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -27,8 +27,8 @@
*/
#include "postgres.h"
-#include "access/heapam.h"
#include "access/relscan.h"
+#include "access/tableam.h"
#include "executor/execdebug.h"
#include "executor/nodeSeqscan.h"
#include "utils/rel.h"
@@ -49,8 +49,7 @@ static TupleTableSlot *SeqNext(SeqScanState *node);
static TupleTableSlot *
SeqNext(SeqScanState *node)
{
- HeapTuple tuple;
- HeapScanDesc scandesc;
+ TableScanDesc scandesc;
EState *estate;
ScanDirection direction;
TupleTableSlot *slot;
@@ -69,34 +68,18 @@ SeqNext(SeqScanState *node)
* We reach here if the scan is not parallel, or if we're serially
* executing a scan that was planned to be parallel.
*/
- scandesc = heap_beginscan(node->ss.ss_currentRelation,
- estate->es_snapshot,
- 0, NULL);
+ scandesc = table_beginscan(node->ss.ss_currentRelation,
+ estate->es_snapshot,
+ 0, NULL);
node->ss.ss_currentScanDesc = scandesc;
}
/*
* get the next tuple from the table
*/
- tuple = heap_getnext(scandesc, direction);
-
- /*
- * save the tuple and the buffer returned to us by the access methods in
- * our scan tuple slot and return the slot. Note: we pass 'false' because
- * tuples returned by heap_getnext() are pointers onto disk pages and were
- * not created with palloc() and so should not be pfree()'d. Note also
- * that ExecStoreHeapTuple will increment the refcount of the buffer; the
- * refcount will not be dropped until the tuple table slot is cleared.
- */
- if (tuple)
- ExecStoreBufferHeapTuple(tuple, /* tuple to store */
- slot, /* slot to store in */
- scandesc->rs_cbuf); /* buffer associated
- * with this tuple */
- else
- ExecClearTuple(slot);
-
- return slot;
+ if (table_scan_getnextslot(scandesc, direction, slot))
+ return slot;
+ return NULL;
}
/*
@@ -174,7 +157,7 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
/* and create slot with the appropriate rowtype */
ExecInitScanTupleSlot(estate, &scanstate->ss,
RelationGetDescr(scanstate->ss.ss_currentRelation),
- &TTSOpsBufferHeapTuple);
+ table_slot_callbacks(scanstate->ss.ss_currentRelation));
/*
* Initialize result type and projection.
@@ -200,7 +183,7 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
void
ExecEndSeqScan(SeqScanState *node)
{
- HeapScanDesc scanDesc;
+ TableScanDesc scanDesc;
/*
* get information from node
@@ -223,7 +206,7 @@ ExecEndSeqScan(SeqScanState *node)
* close heap scan
*/
if (scanDesc != NULL)
- heap_endscan(scanDesc);
+ table_endscan(scanDesc);
}
/* ----------------------------------------------------------------
@@ -240,13 +223,13 @@ ExecEndSeqScan(SeqScanState *node)
void
ExecReScanSeqScan(SeqScanState *node)
{
- HeapScanDesc scan;
+ TableScanDesc scan;
scan = node->ss.ss_currentScanDesc;
if (scan != NULL)
- heap_rescan(scan, /* scan desc */
- NULL); /* new scan keys */
+ table_rescan(scan, /* scan desc */
+ NULL); /* new scan keys */
ExecScanReScan((ScanState *) node);
}
@@ -269,7 +252,8 @@ ExecSeqScanEstimate(SeqScanState *node,
{
EState *estate = node->ss.ps.state;
- node->pscan_len = heap_parallelscan_estimate(estate->es_snapshot);
+ node->pscan_len = table_parallelscan_estimate(node->ss.ss_currentRelation,
+ estate->es_snapshot);
shm_toc_estimate_chunk(&pcxt->estimator, node->pscan_len);
shm_toc_estimate_keys(&pcxt->estimator, 1);
}
@@ -285,15 +269,15 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
- ParallelHeapScanDesc pscan;
+ ParallelTableScanDesc pscan;
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
- heap_parallelscan_initialize(pscan,
- node->ss.ss_currentRelation,
- estate->es_snapshot);
+ table_parallelscan_initialize(node->ss.ss_currentRelation,
+ pscan,
+ estate->es_snapshot);
shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan);
node->ss.ss_currentScanDesc =
- heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
+ table_beginscan_parallel(node->ss.ss_currentRelation, pscan);
}
/* ----------------------------------------------------------------
@@ -306,9 +290,10 @@ void
ExecSeqScanReInitializeDSM(SeqScanState *node,
ParallelContext *pcxt)
{
- HeapScanDesc scan = node->ss.ss_currentScanDesc;
+ ParallelTableScanDesc pscan;
- heap_parallelscan_reinitialize(scan->rs_parallel);
+ pscan = node->ss.ss_currentScanDesc->rs_parallel;
+ table_parallelscan_reinitialize(node->ss.ss_currentRelation, pscan);
}
/* ----------------------------------------------------------------
@@ -321,9 +306,9 @@ void
ExecSeqScanInitializeWorker(SeqScanState *node,
ParallelWorkerContext *pwcxt)
{
- ParallelHeapScanDesc pscan;
+ ParallelTableScanDesc pscan;
pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
node->ss.ss_currentScanDesc =
- heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
+ table_beginscan_parallel(node->ss.ss_currentRelation, pscan);
}