aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/execMain.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/execMain.c')
-rw-r--r--src/backend/executor/execMain.c289
1 files changed, 16 insertions, 273 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 63a34760eec..018e9912e94 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2417,27 +2417,29 @@ ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
/*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
+ * Check the updated version of a tuple to see if we want to process it under
+ * READ COMMITTED rules.
*
* estate - outer executor state data
* epqstate - state for EvalPlanQual rechecking
* relation - table containing tuple
* rti - rangetable index of table containing tuple
- * lockmode - requested tuple lock mode
- * *tid - t_ctid from the outdated tuple (ie, next updated version)
- * priorXmax - t_xmax from the outdated tuple
+ * inputslot - tuple for processing - this can be the slot from
+ * EvalPlanQualSlot(), for the increased efficiency.
*
- * *tid is also an output parameter: it's modified to hold the TID of the
- * latest version of the tuple (note this may be changed even on failure)
+ * This tests whether the tuple in inputslot still matches the relvant
+ * quals. For that result to be useful, typically the input tuple has to be
+ * last row version (otherwise the result isn't particularly useful) and
+ * locked (otherwise the result might be out of date). That's typically
+ * achieved by using table_lock_tuple() with the
+ * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
*
* Returns a slot containing the new candidate update/delete tuple, or
* NULL if we determine we shouldn't process the row.
*/
TupleTableSlot *
EvalPlanQual(EState *estate, EPQState *epqstate,
- Relation relation, Index rti, LockTupleMode lockmode,
- ItemPointer tid, TransactionId priorXmax)
+ Relation relation, Index rti, TupleTableSlot *inputslot)
{
TupleTableSlot *slot;
TupleTableSlot *testslot;
@@ -2450,19 +2452,12 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
EvalPlanQualBegin(epqstate, estate);
/*
- * Get and lock the updated version of the row; if fail, return NULL.
+ * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
+ * an unnecessary copy.
*/
testslot = EvalPlanQualSlot(epqstate, relation, rti);
- if (!EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
- tid, priorXmax,
- testslot))
- return NULL;
-
- /*
- * For UPDATE/DELETE we have to return tid of actual row we're executing
- * PQ for.
- */
- *tid = testslot->tts_tid;
+ if (testslot != inputslot)
+ ExecCopySlot(testslot, inputslot);
/*
* Fetch any non-locked source rows
@@ -2495,258 +2490,6 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
}
/*
- * Fetch a copy of the newest version of an outdated tuple
- *
- * estate - executor state data
- * relation - table containing tuple
- * lockmode - requested tuple lock mode
- * wait_policy - requested lock wait policy
- * *tid - t_ctid from the outdated tuple (ie, next updated version)
- * priorXmax - t_xmax from the outdated tuple
- * slot - slot to store newest tuple version
- *
- * Returns true, with slot containing the newest tuple version, or false if we
- * find that there is no newest version (ie, the row was deleted not updated).
- * We also return false if the tuple is locked and the wait policy is to skip
- * such tuples.
- *
- * If successful, we have locked the newest tuple version, so caller does not
- * need to worry about it changing anymore.
- */
-bool
-EvalPlanQualFetch(EState *estate, Relation relation, LockTupleMode lockmode,
- LockWaitPolicy wait_policy,
- ItemPointer tid, TransactionId priorXmax,
- TupleTableSlot *slot)
-{
- HeapTupleData tuple;
- SnapshotData SnapshotDirty;
-
- /*
- * fetch target tuple
- *
- * Loop here to deal with updated or busy tuples
- */
- InitDirtySnapshot(SnapshotDirty);
- tuple.t_self = *tid;
- for (;;)
- {
- Buffer buffer;
-
- if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
- {
- HTSU_Result test;
- HeapUpdateFailureData hufd;
-
- /*
- * If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
- * the latest version of the row was deleted, so we need do
- * nothing. (Should be safe to examine xmin without getting
- * buffer's content lock. We assume reading a TransactionId to be
- * atomic, and Xmin never changes in an existing tuple, except to
- * invalid or frozen, and neither of those can match priorXmax.)
- */
- if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
- priorXmax))
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /* otherwise xmin should not be dirty... */
- if (TransactionIdIsValid(SnapshotDirty.xmin))
- elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
-
- /*
- * If tuple is being updated by other transaction then we have to
- * wait for its commit/abort, or die trying.
- */
- if (TransactionIdIsValid(SnapshotDirty.xmax))
- {
- ReleaseBuffer(buffer);
- switch (wait_policy)
- {
- case LockWaitBlock:
- XactLockTableWait(SnapshotDirty.xmax,
- relation, &tuple.t_self,
- XLTW_FetchUpdated);
- break;
- case LockWaitSkip:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- return false; /* skip instead of waiting */
- break;
- case LockWaitError:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- ereport(ERROR,
- (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
- break;
- }
- continue; /* loop back to repeat heap_fetch */
- }
-
- /*
- * If tuple was inserted by our own transaction, we have to check
- * cmin against es_output_cid: cmin >= current CID means our
- * command cannot see the tuple, so we should ignore it. Otherwise
- * heap_lock_tuple() will throw an error, and so would any later
- * attempt to update or delete the tuple. (We need not check cmax
- * because HeapTupleSatisfiesDirty will consider a tuple deleted
- * by our transaction dead, regardless of cmax.) We just checked
- * that priorXmax == xmin, so we can test that variable instead of
- * doing HeapTupleHeaderGetXmin again.
- */
- if (TransactionIdIsCurrentTransactionId(priorXmax) &&
- HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * This is a live tuple, so now try to lock it.
- */
- test = heap_lock_tuple(relation, &tuple,
- estate->es_output_cid,
- lockmode, wait_policy,
- false, &buffer, &hufd);
- /* We now have two pins on the buffer, get rid of one */
- ReleaseBuffer(buffer);
-
- switch (test)
- {
- case HeapTupleSelfUpdated:
-
- /*
- * The target tuple was already updated or deleted by the
- * current command, or by a later command in the current
- * transaction. We *must* ignore the tuple in the former
- * case, so as to avoid the "Halloween problem" of
- * repeated update attempts. In the latter case it might
- * be sensible to fetch the updated tuple instead, but
- * doing so would require changing heap_update and
- * heap_delete to not complain about updating "invisible"
- * tuples, which seems pretty scary (heap_lock_tuple will
- * not complain, but few callers expect
- * HeapTupleInvisible, and we're not one of them). So for
- * now, treat the tuple as deleted and do not process.
- */
- ReleaseBuffer(buffer);
- return false;
-
- case HeapTupleMayBeUpdated:
- /* successfully locked */
- break;
-
- case HeapTupleUpdated:
- ReleaseBuffer(buffer);
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- /* Should not encounter speculative tuple on recheck */
- Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
- if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
- {
- /* it was updated, so look at the updated version */
- tuple.t_self = hufd.ctid;
- /* updated row should have xmin matching this xmax */
- priorXmax = hufd.xmax;
- continue;
- }
- /* tuple was deleted, so give up */
- return false;
-
- case HeapTupleWouldBlock:
- ReleaseBuffer(buffer);
- return false;
-
- case HeapTupleInvisible:
- elog(ERROR, "attempted to lock invisible tuple");
- break;
-
- default:
- ReleaseBuffer(buffer);
- elog(ERROR, "unrecognized heap_lock_tuple status: %u",
- test);
- return false; /* keep compiler quiet */
- }
-
- /*
- * We got tuple - store it for use by the recheck query.
- */
- ExecStorePinnedBufferHeapTuple(&tuple, slot, buffer);
- ExecMaterializeSlot(slot);
- break;
- }
-
- /*
- * If the referenced slot was actually empty, the latest version of
- * the row must have been deleted, so we need do nothing.
- */
- if (tuple.t_data == NULL)
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * As above, if xmin isn't what we're expecting, do nothing.
- */
- if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
- priorXmax))
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * If we get here, the tuple was found but failed SnapshotDirty.
- * Assuming the xmin is either a committed xact or our own xact (as it
- * certainly should be if we're trying to modify the tuple), this must
- * mean that the row was updated or deleted by either a committed xact
- * or our own xact. If it was deleted, we can ignore it; if it was
- * updated then chain up to the next version and repeat the whole
- * process.
- *
- * As above, it should be safe to examine xmax and t_ctid without the
- * buffer content lock, because they can't be changing.
- */
-
- /* check whether next version would be in a different partition */
- if (HeapTupleHeaderIndicatesMovedPartitions(tuple.t_data))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- /* check whether tuple has been deleted */
- if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
- {
- /* deleted, so forget about it */
- ReleaseBuffer(buffer);
- return false;
- }
-
- /* updated, so look at the updated row */
- tuple.t_self = tuple.t_data->t_ctid;
- /* updated row should have xmin matching this xmax */
- priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
- ReleaseBuffer(buffer);
- /* loop back to fetch next in chain */
- }
-
- /* signal success */
- return true;
-}
-
-/*
* EvalPlanQualInit -- initialize during creation of a plan state node
* that might need to invoke EPQ processing.
*
@@ -2911,7 +2654,7 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- false, NULL))
+ NULL))
elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
/* successful, store tuple */