aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeBitmapHeapscan.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2007-09-20 17:56:33 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2007-09-20 17:56:33 +0000
commit282d2a03dd30804b01f8042f640d638c2ee76604 (patch)
tree004f08ce31f1bfb03ab55571ad7867babe5b3d7f /src/backend/executor/nodeBitmapHeapscan.c
parentbbf4fdc2538097bb3103806e1419ceef1f289203 (diff)
downloadpostgresql-282d2a03dd30804b01f8042f640d638c2ee76604.tar.gz
postgresql-282d2a03dd30804b01f8042f640d638c2ee76604.zip
HOT updates. When we update a tuple without changing any of its indexed
columns, and the new version can be stored on the same heap page, we no longer generate extra index entries for the new version. Instead, index searches follow the HOT-chain links to ensure they find the correct tuple version. In addition, this patch introduces the ability to "prune" dead tuples on a per-page basis, without having to do a complete VACUUM pass to recover space. VACUUM is still needed to clean up dead index entries, however. Pavan Deolasee, with help from a bunch of other people.
Diffstat (limited to 'src/backend/executor/nodeBitmapHeapscan.c')
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c100
1 files changed, 41 insertions, 59 deletions
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index f1e30aeb8f0..87e0063a03a 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.19 2007/09/12 22:10:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.20 2007/09/20 17:56:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -240,12 +240,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
BlockNumber page = tbmres->blockno;
Buffer buffer;
Snapshot snapshot;
- Page dp;
int ntup;
- int curslot;
- int minslot;
- int maxslot;
- int maxoff;
/*
* Acquire pin on the target heap page, trading in any pin we held before.
@@ -258,6 +253,13 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
buffer = scan->rs_cbuf;
snapshot = scan->rs_snapshot;
+ ntup = 0;
+
+ /*
+ * Prune and repair fragmentation for the whole page, if possible.
+ */
+ heap_page_prune_opt(scan->rs_rd, buffer, RecentGlobalXmin);
+
/*
* We must hold share lock on the buffer content while examining tuple
* visibility. Afterwards, however, the tuples we have found to be
@@ -265,71 +267,51 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- dp = (Page) BufferGetPage(buffer);
- maxoff = PageGetMaxOffsetNumber(dp);
-
/*
- * Determine how many entries we need to look at on this page. If the
- * bitmap is lossy then we need to look at each physical item pointer;
- * otherwise we just look through the offsets listed in tbmres.
+ * We need two separate strategies for lossy and non-lossy cases.
*/
if (tbmres->ntuples >= 0)
{
- /* non-lossy case */
- minslot = 0;
- maxslot = tbmres->ntuples - 1;
- }
- else
- {
- /* lossy case */
- minslot = FirstOffsetNumber;
- maxslot = maxoff;
- }
-
- ntup = 0;
- for (curslot = minslot; curslot <= maxslot; curslot++)
- {
- OffsetNumber targoffset;
- ItemId lp;
- HeapTupleData loctup;
- bool valid;
-
- if (tbmres->ntuples >= 0)
- {
- /* non-lossy case */
- targoffset = tbmres->offsets[curslot];
- }
- else
- {
- /* lossy case */
- targoffset = (OffsetNumber) curslot;
- }
-
/*
- * We'd better check for out-of-range offnum in case of VACUUM since
- * the TID was obtained.
+ * Bitmap is non-lossy, so we just look through the offsets listed in
+ * tbmres; but we have to follow any HOT chain starting at each such
+ * offset.
*/
- if (targoffset < FirstOffsetNumber || targoffset > maxoff)
- continue;
+ int curslot;
- lp = PageGetItemId(dp, targoffset);
+ for (curslot = 0; curslot < tbmres->ntuples; curslot++)
+ {
+ OffsetNumber offnum = tbmres->offsets[curslot];
+ ItemPointerData tid;
+ ItemPointerSet(&tid, page, offnum);
+ if (heap_hot_search_buffer(&tid, buffer, snapshot, NULL))
+ scan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
+ }
+ }
+ else
+ {
/*
- * Must check for deleted tuple.
+ * Bitmap is lossy, so we must examine each item pointer on the page.
+ * But we can ignore HOT chains, since we'll check each tuple anyway.
*/
- if (!ItemIdIsNormal(lp))
- continue;
+ Page dp = (Page) BufferGetPage(buffer);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
+ OffsetNumber offnum;
- /*
- * check time qualification of tuple, remember it if valid
- */
- loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
- loctup.t_len = ItemIdGetLength(lp);
- ItemPointerSet(&(loctup.t_self), page, targoffset);
+ for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum++)
+ {
+ ItemId lp;
+ HeapTupleData loctup;
- valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
- if (valid)
- scan->rs_vistuples[ntup++] = targoffset;
+ lp = PageGetItemId(dp, offnum);
+ if (!ItemIdIsNormal(lp))
+ continue;
+ loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
+ loctup.t_len = ItemIdGetLength(lp);
+ if (HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer))
+ scan->rs_vistuples[ntup++] = offnum;
+ }
}
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);