diff options
Diffstat (limited to 'src/backend/executor')
-rw-r--r-- | src/backend/executor/execMain.c | 6 | ||||
-rw-r--r-- | src/backend/executor/execUtils.c | 10 | ||||
-rw-r--r-- | src/backend/executor/nodeBitmapHeapscan.c | 100 | ||||
-rw-r--r-- | src/backend/executor/spi.c | 4 |
4 files changed, 57 insertions, 63 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 7e5873b89df..485f6ddc1ee 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,7 +26,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.297 2007/09/07 20:59:26 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.298 2007/09/20 17:56:31 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1813,8 +1813,10 @@ lreplace:; * * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. + * + * If it's a HOT update, we mustn't insert new index entries. */ - if (resultRelInfo->ri_NumIndices > 0) + if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple)) ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); /* AFTER ROW UPDATE Triggers */ diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 1d478062998..790a9dccc10 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.150 2007/08/15 21:39:50 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.151 2007/09/20 17:56:31 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -981,6 +981,10 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo) * stuff as it only exists here because the genam stuff * doesn't provide the functionality needed by the * executor.. -cim 9/27/89 + * + * CAUTION: this must not be called for a HOT update. + * We can't defend against that here for lack of info. + * Should we change the API to make it safer? * ---------------------------------------------------------------- */ void @@ -1029,6 +1033,10 @@ ExecInsertIndexTuples(TupleTableSlot *slot, indexInfo = indexInfoArray[i]; + /* If the index is marked as read-only, ignore it */ + if (!indexInfo->ii_ReadyForInserts) + continue; + /* Check for partial index */ if (indexInfo->ii_Predicate != NIL) { diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index f1e30aeb8f0..87e0063a03a 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -21,7 +21,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.19 2007/09/12 22:10:26 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.20 2007/09/20 17:56:31 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -240,12 +240,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) BlockNumber page = tbmres->blockno; Buffer buffer; Snapshot snapshot; - Page dp; int ntup; - int curslot; - int minslot; - int maxslot; - int maxoff; /* * Acquire pin on the target heap page, trading in any pin we held before. @@ -258,6 +253,13 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) buffer = scan->rs_cbuf; snapshot = scan->rs_snapshot; + ntup = 0; + + /* + * Prune and repair fragmentation for the whole page, if possible. + */ + heap_page_prune_opt(scan->rs_rd, buffer, RecentGlobalXmin); + /* * We must hold share lock on the buffer content while examining tuple * visibility. Afterwards, however, the tuples we have found to be @@ -265,71 +267,51 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) */ LockBuffer(buffer, BUFFER_LOCK_SHARE); - dp = (Page) BufferGetPage(buffer); - maxoff = PageGetMaxOffsetNumber(dp); - /* - * Determine how many entries we need to look at on this page. If the - * bitmap is lossy then we need to look at each physical item pointer; - * otherwise we just look through the offsets listed in tbmres. + * We need two separate strategies for lossy and non-lossy cases. */ if (tbmres->ntuples >= 0) { - /* non-lossy case */ - minslot = 0; - maxslot = tbmres->ntuples - 1; - } - else - { - /* lossy case */ - minslot = FirstOffsetNumber; - maxslot = maxoff; - } - - ntup = 0; - for (curslot = minslot; curslot <= maxslot; curslot++) - { - OffsetNumber targoffset; - ItemId lp; - HeapTupleData loctup; - bool valid; - - if (tbmres->ntuples >= 0) - { - /* non-lossy case */ - targoffset = tbmres->offsets[curslot]; - } - else - { - /* lossy case */ - targoffset = (OffsetNumber) curslot; - } - /* - * We'd better check for out-of-range offnum in case of VACUUM since - * the TID was obtained. + * Bitmap is non-lossy, so we just look through the offsets listed in + * tbmres; but we have to follow any HOT chain starting at each such + * offset. */ - if (targoffset < FirstOffsetNumber || targoffset > maxoff) - continue; + int curslot; - lp = PageGetItemId(dp, targoffset); + for (curslot = 0; curslot < tbmres->ntuples; curslot++) + { + OffsetNumber offnum = tbmres->offsets[curslot]; + ItemPointerData tid; + ItemPointerSet(&tid, page, offnum); + if (heap_hot_search_buffer(&tid, buffer, snapshot, NULL)) + scan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid); + } + } + else + { /* - * Must check for deleted tuple. + * Bitmap is lossy, so we must examine each item pointer on the page. + * But we can ignore HOT chains, since we'll check each tuple anyway. */ - if (!ItemIdIsNormal(lp)) - continue; + Page dp = (Page) BufferGetPage(buffer); + OffsetNumber maxoff = PageGetMaxOffsetNumber(dp); + OffsetNumber offnum; - /* - * check time qualification of tuple, remember it if valid - */ - loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); - loctup.t_len = ItemIdGetLength(lp); - ItemPointerSet(&(loctup.t_self), page, targoffset); + for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum++) + { + ItemId lp; + HeapTupleData loctup; - valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer); - if (valid) - scan->rs_vistuples[ntup++] = targoffset; + lp = PageGetItemId(dp, offnum); + if (!ItemIdIsNormal(lp)) + continue; + loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); + loctup.t_len = ItemIdGetLength(lp); + if (HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer)) + scan->rs_vistuples[ntup++] = offnum; + } } LockBuffer(buffer, BUFFER_LOCK_UNLOCK); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index af94ad1a3b8..875e4da2914 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.180 2007/08/15 19:15:46 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.181 2007/09/20 17:56:31 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1407,6 +1407,7 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan) plansource->num_params = nargs; plansource->fully_planned = true; plansource->fixed_result = false; + /* no need to set search_path, generation or saved_xmin */ plansource->resultDesc = PlanCacheComputeResultDesc(stmt_list); plansource->plan = cplan; @@ -1973,6 +1974,7 @@ _SPI_copy_plan(SPIPlanPtr plan, MemoryContext parentcxt) newsource->num_params = newplan->nargs; newsource->fully_planned = plansource->fully_planned; newsource->fixed_result = plansource->fixed_result; + /* no need to worry about seach_path, generation or saved_xmin */ if (plansource->resultDesc) newsource->resultDesc = CreateTupleDescCopy(plansource->resultDesc); newsource->plan = newcplan; |