aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execIndexing.c4
-rw-r--r--src/backend/executor/execMain.c289
-rw-r--r--src/backend/executor/execReplication.c137
-rw-r--r--src/backend/executor/nodeLockRows.c142
-rw-r--r--src/backend/executor/nodeModifyTable.c435
-rw-r--r--src/backend/executor/nodeTidscan.c2
6 files changed, 374 insertions, 635 deletions
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index e67dd6750c6..3b602bb8baf 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -271,12 +271,12 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
*/
List *
ExecInsertIndexTuples(TupleTableSlot *slot,
- ItemPointer tupleid,
EState *estate,
bool noDupErr,
bool *specConflict,
List *arbiterIndexes)
{
+ ItemPointer tupleid = &slot->tts_tid;
List *result = NIL;
ResultRelInfo *resultRelInfo;
int i;
@@ -288,6 +288,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
Datum values[INDEX_MAX_KEYS];
bool isnull[INDEX_MAX_KEYS];
+ Assert(ItemPointerIsValid(tupleid));
+
/*
* Get information from the result relation info structure.
*/
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 63a34760eec..018e9912e94 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2417,27 +2417,29 @@ ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
/*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
+ * Check the updated version of a tuple to see if we want to process it under
+ * READ COMMITTED rules.
*
* estate - outer executor state data
* epqstate - state for EvalPlanQual rechecking
* relation - table containing tuple
* rti - rangetable index of table containing tuple
- * lockmode - requested tuple lock mode
- * *tid - t_ctid from the outdated tuple (ie, next updated version)
- * priorXmax - t_xmax from the outdated tuple
+ * inputslot - tuple for processing - this can be the slot from
+ * EvalPlanQualSlot(), for the increased efficiency.
*
- * *tid is also an output parameter: it's modified to hold the TID of the
- * latest version of the tuple (note this may be changed even on failure)
+ * This tests whether the tuple in inputslot still matches the relvant
+ * quals. For that result to be useful, typically the input tuple has to be
+ * last row version (otherwise the result isn't particularly useful) and
+ * locked (otherwise the result might be out of date). That's typically
+ * achieved by using table_lock_tuple() with the
+ * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
*
* Returns a slot containing the new candidate update/delete tuple, or
* NULL if we determine we shouldn't process the row.
*/
TupleTableSlot *
EvalPlanQual(EState *estate, EPQState *epqstate,
- Relation relation, Index rti, LockTupleMode lockmode,
- ItemPointer tid, TransactionId priorXmax)
+ Relation relation, Index rti, TupleTableSlot *inputslot)
{
TupleTableSlot *slot;
TupleTableSlot *testslot;
@@ -2450,19 +2452,12 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
EvalPlanQualBegin(epqstate, estate);
/*
- * Get and lock the updated version of the row; if fail, return NULL.
+ * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
+ * an unnecessary copy.
*/
testslot = EvalPlanQualSlot(epqstate, relation, rti);
- if (!EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
- tid, priorXmax,
- testslot))
- return NULL;
-
- /*
- * For UPDATE/DELETE we have to return tid of actual row we're executing
- * PQ for.
- */
- *tid = testslot->tts_tid;
+ if (testslot != inputslot)
+ ExecCopySlot(testslot, inputslot);
/*
* Fetch any non-locked source rows
@@ -2495,258 +2490,6 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
}
/*
- * Fetch a copy of the newest version of an outdated tuple
- *
- * estate - executor state data
- * relation - table containing tuple
- * lockmode - requested tuple lock mode
- * wait_policy - requested lock wait policy
- * *tid - t_ctid from the outdated tuple (ie, next updated version)
- * priorXmax - t_xmax from the outdated tuple
- * slot - slot to store newest tuple version
- *
- * Returns true, with slot containing the newest tuple version, or false if we
- * find that there is no newest version (ie, the row was deleted not updated).
- * We also return false if the tuple is locked and the wait policy is to skip
- * such tuples.
- *
- * If successful, we have locked the newest tuple version, so caller does not
- * need to worry about it changing anymore.
- */
-bool
-EvalPlanQualFetch(EState *estate, Relation relation, LockTupleMode lockmode,
- LockWaitPolicy wait_policy,
- ItemPointer tid, TransactionId priorXmax,
- TupleTableSlot *slot)
-{
- HeapTupleData tuple;
- SnapshotData SnapshotDirty;
-
- /*
- * fetch target tuple
- *
- * Loop here to deal with updated or busy tuples
- */
- InitDirtySnapshot(SnapshotDirty);
- tuple.t_self = *tid;
- for (;;)
- {
- Buffer buffer;
-
- if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
- {
- HTSU_Result test;
- HeapUpdateFailureData hufd;
-
- /*
- * If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
- * the latest version of the row was deleted, so we need do
- * nothing. (Should be safe to examine xmin without getting
- * buffer's content lock. We assume reading a TransactionId to be
- * atomic, and Xmin never changes in an existing tuple, except to
- * invalid or frozen, and neither of those can match priorXmax.)
- */
- if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
- priorXmax))
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /* otherwise xmin should not be dirty... */
- if (TransactionIdIsValid(SnapshotDirty.xmin))
- elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
-
- /*
- * If tuple is being updated by other transaction then we have to
- * wait for its commit/abort, or die trying.
- */
- if (TransactionIdIsValid(SnapshotDirty.xmax))
- {
- ReleaseBuffer(buffer);
- switch (wait_policy)
- {
- case LockWaitBlock:
- XactLockTableWait(SnapshotDirty.xmax,
- relation, &tuple.t_self,
- XLTW_FetchUpdated);
- break;
- case LockWaitSkip:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- return false; /* skip instead of waiting */
- break;
- case LockWaitError:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- ereport(ERROR,
- (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
- break;
- }
- continue; /* loop back to repeat heap_fetch */
- }
-
- /*
- * If tuple was inserted by our own transaction, we have to check
- * cmin against es_output_cid: cmin >= current CID means our
- * command cannot see the tuple, so we should ignore it. Otherwise
- * heap_lock_tuple() will throw an error, and so would any later
- * attempt to update or delete the tuple. (We need not check cmax
- * because HeapTupleSatisfiesDirty will consider a tuple deleted
- * by our transaction dead, regardless of cmax.) We just checked
- * that priorXmax == xmin, so we can test that variable instead of
- * doing HeapTupleHeaderGetXmin again.
- */
- if (TransactionIdIsCurrentTransactionId(priorXmax) &&
- HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * This is a live tuple, so now try to lock it.
- */
- test = heap_lock_tuple(relation, &tuple,
- estate->es_output_cid,
- lockmode, wait_policy,
- false, &buffer, &hufd);
- /* We now have two pins on the buffer, get rid of one */
- ReleaseBuffer(buffer);
-
- switch (test)
- {
- case HeapTupleSelfUpdated:
-
- /*
- * The target tuple was already updated or deleted by the
- * current command, or by a later command in the current
- * transaction. We *must* ignore the tuple in the former
- * case, so as to avoid the "Halloween problem" of
- * repeated update attempts. In the latter case it might
- * be sensible to fetch the updated tuple instead, but
- * doing so would require changing heap_update and
- * heap_delete to not complain about updating "invisible"
- * tuples, which seems pretty scary (heap_lock_tuple will
- * not complain, but few callers expect
- * HeapTupleInvisible, and we're not one of them). So for
- * now, treat the tuple as deleted and do not process.
- */
- ReleaseBuffer(buffer);
- return false;
-
- case HeapTupleMayBeUpdated:
- /* successfully locked */
- break;
-
- case HeapTupleUpdated:
- ReleaseBuffer(buffer);
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- /* Should not encounter speculative tuple on recheck */
- Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
- if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
- {
- /* it was updated, so look at the updated version */
- tuple.t_self = hufd.ctid;
- /* updated row should have xmin matching this xmax */
- priorXmax = hufd.xmax;
- continue;
- }
- /* tuple was deleted, so give up */
- return false;
-
- case HeapTupleWouldBlock:
- ReleaseBuffer(buffer);
- return false;
-
- case HeapTupleInvisible:
- elog(ERROR, "attempted to lock invisible tuple");
- break;
-
- default:
- ReleaseBuffer(buffer);
- elog(ERROR, "unrecognized heap_lock_tuple status: %u",
- test);
- return false; /* keep compiler quiet */
- }
-
- /*
- * We got tuple - store it for use by the recheck query.
- */
- ExecStorePinnedBufferHeapTuple(&tuple, slot, buffer);
- ExecMaterializeSlot(slot);
- break;
- }
-
- /*
- * If the referenced slot was actually empty, the latest version of
- * the row must have been deleted, so we need do nothing.
- */
- if (tuple.t_data == NULL)
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * As above, if xmin isn't what we're expecting, do nothing.
- */
- if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
- priorXmax))
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * If we get here, the tuple was found but failed SnapshotDirty.
- * Assuming the xmin is either a committed xact or our own xact (as it
- * certainly should be if we're trying to modify the tuple), this must
- * mean that the row was updated or deleted by either a committed xact
- * or our own xact. If it was deleted, we can ignore it; if it was
- * updated then chain up to the next version and repeat the whole
- * process.
- *
- * As above, it should be safe to examine xmax and t_ctid without the
- * buffer content lock, because they can't be changing.
- */
-
- /* check whether next version would be in a different partition */
- if (HeapTupleHeaderIndicatesMovedPartitions(tuple.t_data))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- /* check whether tuple has been deleted */
- if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
- {
- /* deleted, so forget about it */
- ReleaseBuffer(buffer);
- return false;
- }
-
- /* updated, so look at the updated row */
- tuple.t_self = tuple.t_data->t_ctid;
- /* updated row should have xmin matching this xmax */
- priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
- ReleaseBuffer(buffer);
- /* loop back to fetch next in chain */
- }
-
- /* signal success */
- return true;
-}
-
-/*
* EvalPlanQualInit -- initialize during creation of a plan state node
* that might need to invoke EPQ processing.
*
@@ -2911,7 +2654,7 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- false, NULL))
+ NULL))
elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
/* successful, store tuple */
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index c539bb5a3f6..d8b48c667ce 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -15,7 +15,6 @@
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
#include "access/relscan.h"
#include "access/tableam.h"
#include "access/transam.h"
@@ -168,35 +167,28 @@ retry:
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
- Buffer buf;
- HeapUpdateFailureData hufd;
- HTSU_Result res;
- HeapTupleData locktup;
- HeapTupleTableSlot *hslot = (HeapTupleTableSlot *)outslot;
-
- /* Only a heap tuple has item pointers. */
- Assert(TTS_IS_HEAPTUPLE(outslot) || TTS_IS_BUFFERTUPLE(outslot));
- ItemPointerCopy(&hslot->tuple->t_self, &locktup.t_self);
+ TM_FailureData tmfd;
+ TM_Result res;
PushActiveSnapshot(GetLatestSnapshot());
- res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
- lockmode,
- LockWaitBlock,
- false /* don't follow updates */ ,
- &buf, &hufd);
- /* the tuple slot already has the buffer pinned */
- ReleaseBuffer(buf);
+ res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(),
+ outslot,
+ GetCurrentCommandId(false),
+ lockmode,
+ LockWaitBlock,
+ 0 /* don't follow updates */ ,
+ &tmfd);
PopActiveSnapshot();
switch (res)
{
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
break;
- case HeapTupleUpdated:
+ case TM_Updated:
/* XXX: Improve handling here */
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
+ if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid))
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying")));
@@ -205,11 +197,17 @@ retry:
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("concurrent update, retrying")));
goto retry;
- case HeapTupleInvisible:
+ case TM_Deleted:
+ /* XXX: Improve handling here */
+ ereport(LOG,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("concurrent delete, retrying")));
+ goto retry;
+ case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
- elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
+ elog(ERROR, "unexpected table_lock_tuple status: %u", res);
break;
}
}
@@ -333,35 +331,28 @@ retry:
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
- Buffer buf;
- HeapUpdateFailureData hufd;
- HTSU_Result res;
- HeapTupleData locktup;
- HeapTupleTableSlot *hslot = (HeapTupleTableSlot *)outslot;
-
- /* Only a heap tuple has item pointers. */
- Assert(TTS_IS_HEAPTUPLE(outslot) || TTS_IS_BUFFERTUPLE(outslot));
- ItemPointerCopy(&hslot->tuple->t_self, &locktup.t_self);
+ TM_FailureData tmfd;
+ TM_Result res;
PushActiveSnapshot(GetLatestSnapshot());
- res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
- lockmode,
- LockWaitBlock,
- false /* don't follow updates */ ,
- &buf, &hufd);
- /* the tuple slot already has the buffer pinned */
- ReleaseBuffer(buf);
+ res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(),
+ outslot,
+ GetCurrentCommandId(false),
+ lockmode,
+ LockWaitBlock,
+ 0 /* don't follow updates */ ,
+ &tmfd);
PopActiveSnapshot();
switch (res)
{
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
break;
- case HeapTupleUpdated:
+ case TM_Updated:
/* XXX: Improve handling here */
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
+ if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid))
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying")));
@@ -370,11 +361,17 @@ retry:
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("concurrent update, retrying")));
goto retry;
- case HeapTupleInvisible:
+ case TM_Deleted:
+ /* XXX: Improve handling here */
+ ereport(LOG,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("concurrent delete, retrying")));
+ goto retry;
+ case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
- elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
+ elog(ERROR, "unexpected table_lock_tuple status: %u", res);
break;
}
}
@@ -395,7 +392,6 @@ void
ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
{
bool skip_tuple = false;
- HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
@@ -422,16 +418,11 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- /* Materialize slot into a tuple that we can scribble upon. */
- tuple = ExecFetchSlotHeapTuple(slot, true, NULL);
-
/* OK, store the tuple and create index entries for it */
- simple_heap_insert(rel, tuple);
- ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+ simple_table_insert(resultRelInfo->ri_RelationDesc, slot);
if (resultRelInfo->ri_NumIndices > 0)
- recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate, false, NULL,
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
NIL);
/* AFTER ROW INSERT Triggers */
@@ -459,13 +450,9 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
TupleTableSlot *searchslot, TupleTableSlot *slot)
{
bool skip_tuple = false;
- HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
- HeapTupleTableSlot *hsearchslot = (HeapTupleTableSlot *)searchslot;
-
- /* We expect the searchslot to contain a heap tuple. */
- Assert(TTS_IS_HEAPTUPLE(searchslot) || TTS_IS_BUFFERTUPLE(searchslot));
+ ItemPointer tid = &(searchslot->tts_tid);
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -477,14 +464,14 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
resultRelInfo->ri_TrigDesc->trig_update_before_row)
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
- &hsearchslot->tuple->t_self,
- NULL, slot))
+ tid, NULL, slot))
skip_tuple = true; /* "do nothing" */
}
if (!skip_tuple)
{
List *recheckIndexes = NIL;
+ bool update_indexes;
/* Check the constraints of the tuple */
if (rel->rd_att->constr)
@@ -492,23 +479,16 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- /* Materialize slot into a tuple that we can scribble upon. */
- tuple = ExecFetchSlotHeapTuple(slot, true, NULL);
+ simple_table_update(rel, tid, slot,estate->es_snapshot,
+ &update_indexes);
- /* OK, update the tuple and index entries for it */
- simple_heap_update(rel, &hsearchslot->tuple->t_self, tuple);
- ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
-
- if (resultRelInfo->ri_NumIndices > 0 &&
- !HeapTupleIsHeapOnly(tuple))
- recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate, false, NULL,
+ if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
NIL);
/* AFTER ROW UPDATE Triggers */
ExecARUpdateTriggers(estate, resultRelInfo,
- &(tuple->t_self),
- NULL, slot,
+ tid, NULL, slot,
recheckIndexes, NULL);
list_free(recheckIndexes);
@@ -528,11 +508,7 @@ ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
bool skip_tuple = false;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
- HeapTupleTableSlot *hsearchslot = (HeapTupleTableSlot *)searchslot;
-
- /* For now we support only tables and heap tuples. */
- Assert(rel->rd_rel->relkind == RELKIND_RELATION);
- Assert(TTS_IS_HEAPTUPLE(searchslot) || TTS_IS_BUFFERTUPLE(searchslot));
+ ItemPointer tid = &searchslot->tts_tid;
CheckCmdReplicaIdentity(rel, CMD_DELETE);
@@ -541,23 +517,18 @@ ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
resultRelInfo->ri_TrigDesc->trig_delete_before_row)
{
skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
- &hsearchslot->tuple->t_self,
- NULL, NULL);
+ tid, NULL, NULL);
}
if (!skip_tuple)
{
- List *recheckIndexes = NIL;
-
/* OK, delete the tuple */
- simple_heap_delete(rel, &hsearchslot->tuple->t_self);
+ simple_table_delete(rel, tid, estate->es_snapshot);
/* AFTER ROW DELETE Triggers */
ExecARDeleteTriggers(estate, resultRelInfo,
- &hsearchslot->tuple->t_self, NULL, NULL);
-
- list_free(recheckIndexes);
+ tid, NULL, NULL);
}
}
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 76f0f9d66e5..7674ac893c2 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -21,14 +21,12 @@
#include "postgres.h"
-#include "access/heapam.h"
-#include "access/htup_details.h"
+#include "access/tableam.h"
#include "access/xact.h"
#include "executor/executor.h"
#include "executor/nodeLockRows.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
-#include "storage/bufmgr.h"
#include "utils/rel.h"
@@ -82,11 +80,11 @@ lnext:
ExecRowMark *erm = aerm->rowmark;
Datum datum;
bool isNull;
- HeapTupleData tuple;
- Buffer buffer;
- HeapUpdateFailureData hufd;
+ ItemPointerData tid;
+ TM_FailureData tmfd;
LockTupleMode lockmode;
- HTSU_Result test;
+ int lockflags = 0;
+ TM_Result test;
TupleTableSlot *markSlot;
/* clear any leftover test tuple for this rel */
@@ -112,6 +110,7 @@ lnext:
/* this child is inactive right now */
erm->ermActive = false;
ItemPointerSetInvalid(&(erm->curCtid));
+ ExecClearTuple(markSlot);
continue;
}
}
@@ -160,8 +159,8 @@ lnext:
continue;
}
- /* okay, try to lock the tuple */
- tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+ /* okay, try to lock (and fetch) the tuple */
+ tid = *((ItemPointer) DatumGetPointer(datum));
switch (erm->markType)
{
case ROW_MARK_EXCLUSIVE:
@@ -182,18 +181,23 @@ lnext:
break;
}
- test = heap_lock_tuple(erm->relation, &tuple,
- estate->es_output_cid,
- lockmode, erm->waitPolicy, true,
- &buffer, &hufd);
- ReleaseBuffer(buffer);
+ lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS;
+ if (!IsolationUsesXactSnapshot())
+ lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
+
+ test = table_lock_tuple(erm->relation, &tid, estate->es_snapshot,
+ markSlot, estate->es_output_cid,
+ lockmode, erm->waitPolicy,
+ lockflags,
+ &tmfd);
+
switch (test)
{
- case HeapTupleWouldBlock:
+ case TM_WouldBlock:
/* couldn't lock tuple in SKIP LOCKED mode */
goto lnext;
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/*
* The target tuple was already updated or deleted by the
@@ -204,65 +208,50 @@ lnext:
* to fetch the updated tuple instead, but doing so would
* require changing heap_update and heap_delete to not
* complain about updating "invisible" tuples, which seems
- * pretty scary (heap_lock_tuple will not complain, but few
- * callers expect HeapTupleInvisible, and we're not one of
- * them). So for now, treat the tuple as deleted and do not
- * process.
+ * pretty scary (table_lock_tuple will not complain, but few
+ * callers expect TM_Invisible, and we're not one of them). So
+ * for now, treat the tuple as deleted and do not process.
*/
goto lnext;
- case HeapTupleMayBeUpdated:
- /* got the lock successfully */
+ case TM_Ok:
+
+ /*
+ * Got the lock successfully, the locked tuple saved in
+ * markSlot for, if needed, EvalPlanQual testing below.
+ */
+ if (tmfd.traversed)
+ epq_needed = true;
break;
- case HeapTupleUpdated:
+ case TM_Updated:
if (IsolationUsesXactSnapshot())
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
+ elog(ERROR, "unexpected table_lock_tuple status: %u",
+ test);
+ break;
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- if (ItemPointerEquals(&hufd.ctid, &tuple.t_self))
- {
- /* Tuple was deleted, so don't return it */
- goto lnext;
- }
-
- /* updated, so fetch and lock the updated version */
- if (!EvalPlanQualFetch(estate, erm->relation,
- lockmode, erm->waitPolicy,
- &hufd.ctid, hufd.xmax,
- markSlot))
- {
- /*
- * Tuple was deleted; or it's locked and we're under SKIP
- * LOCKED policy, so don't return it
- */
- goto lnext;
- }
- /* remember the actually locked tuple's TID */
- tuple.t_self = markSlot->tts_tid;
-
- /* Remember we need to do EPQ testing */
- epq_needed = true;
-
- /* Continue loop until we have all target tuples */
- break;
+ errmsg("could not serialize access due to concurrent update")));
+ /* tuple was deleted so don't return it */
+ goto lnext;
- case HeapTupleInvisible:
+ case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
- elog(ERROR, "unrecognized heap_lock_tuple status: %u",
+ elog(ERROR, "unrecognized table_lock_tuple status: %u",
test);
}
/* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
- erm->curCtid = tuple.t_self;
+ erm->curCtid = tid;
}
/*
@@ -271,49 +260,6 @@ lnext:
if (epq_needed)
{
/*
- * Fetch a copy of any rows that were successfully locked without any
- * update having occurred. (We do this in a separate pass so as to
- * avoid overhead in the common case where there are no concurrent
- * updates.) Make sure any inactive child rels have NULL test tuples
- * in EPQ.
- */
- foreach(lc, node->lr_arowMarks)
- {
- ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
- ExecRowMark *erm = aerm->rowmark;
- TupleTableSlot *markSlot;
- HeapTupleData tuple;
- Buffer buffer;
-
- markSlot = EvalPlanQualSlot(&node->lr_epqstate, erm->relation, erm->rti);
-
- /* skip non-active child tables, but clear their test tuples */
- if (!erm->ermActive)
- {
- Assert(erm->rti != erm->prti); /* check it's child table */
- ExecClearTuple(markSlot);
- continue;
- }
-
- /* was tuple updated and fetched above? */
- if (!TupIsNull(markSlot))
- continue;
-
- /* foreign tables should have been fetched above */
- Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);
- Assert(ItemPointerIsValid(&(erm->curCtid)));
-
- /* okay, fetch the tuple */
- tuple.t_self = erm->curCtid;
- if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- false, NULL))
- elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
- ExecStorePinnedBufferHeapTuple(&tuple, markSlot, buffer);
- ExecMaterializeSlot(markSlot);
- /* successful, use tuple in slot */
- }
-
- /*
* Now fetch any non-locked source rows --- the EPQ logic knows how to
* do that.
*/
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index fa92db130bb..1374b751767 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -181,7 +181,7 @@ ExecProcessReturning(ResultRelInfo *resultRelInfo,
}
/*
- * ExecCheckHeapTupleVisible -- verify heap tuple is visible
+ * ExecCheckTupleVisible -- verify tuple is visible
*
* It would not be consistent with guarantees of the higher isolation levels to
* proceed with avoiding insertion (taking speculative insertion's alternative
@@ -189,41 +189,44 @@ ExecProcessReturning(ResultRelInfo *resultRelInfo,
* Check for the need to raise a serialization failure, and do so as necessary.
*/
static void
-ExecCheckHeapTupleVisible(EState *estate,
- HeapTuple tuple,
- Buffer buffer)
+ExecCheckTupleVisible(EState *estate,
+ Relation rel,
+ TupleTableSlot *slot)
{
if (!IsolationUsesXactSnapshot())
return;
- /*
- * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
- * Caller should be holding pin, but not lock.
- */
- LockBuffer(buffer, BUFFER_LOCK_SHARE);
- if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
+ if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
{
+ Datum xminDatum;
+ TransactionId xmin;
+ bool isnull;
+
+ xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
+ Assert(!isnull);
+ xmin = DatumGetTransactionId(xminDatum);
+
/*
* We should not raise a serialization failure if the conflict is
* against a tuple inserted by our own transaction, even if it's not
* visible to our snapshot. (This would happen, for example, if
* conflicting keys are proposed for insertion in a single command.)
*/
- if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
+ if (!TransactionIdIsCurrentTransactionId(xmin))
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
}
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
}
/*
- * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible()
+ * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
*/
static void
ExecCheckTIDVisible(EState *estate,
ResultRelInfo *relinfo,
- ItemPointer tid)
+ ItemPointer tid,
+ TupleTableSlot *tempSlot)
{
Relation rel = relinfo->ri_RelationDesc;
Buffer buffer;
@@ -234,10 +237,11 @@ ExecCheckTIDVisible(EState *estate,
return;
tuple.t_self = *tid;
- if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL))
+ if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, NULL))
elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
- ExecCheckHeapTupleVisible(estate, &tuple, buffer);
- ReleaseBuffer(buffer);
+ ExecStorePinnedBufferHeapTuple(&tuple, tempSlot, buffer);
+ ExecCheckTupleVisible(estate, rel, tempSlot);
+ ExecClearTuple(tempSlot);
}
/* ----------------------------------------------------------------
@@ -319,7 +323,6 @@ ExecInsert(ModifyTableState *mtstate,
else
{
WCOKind wco_kind;
- HeapTuple inserttuple;
/*
* Constraints might reference the tableoid column, so (re-)initialize
@@ -417,16 +420,21 @@ ExecInsert(ModifyTableState *mtstate,
* In case of ON CONFLICT DO NOTHING, do nothing. However,
* verify that the tuple is visible to the executor's MVCC
* snapshot at higher isolation levels.
+ *
+ * Using ExecGetReturningSlot() to store the tuple for the
+ * recheck isn't that pretty, but we can't trivially use
+ * the input slot, because it might not be of a compatible
+ * type. As there's no conflicting usage of
+ * ExecGetReturningSlot() in the DO NOTHING case...
*/
Assert(onconflict == ONCONFLICT_NOTHING);
- ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
+ ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
+ ExecGetReturningSlot(estate, resultRelInfo));
InstrCountTuples2(&mtstate->ps, 1);
return NULL;
}
}
- inserttuple = ExecFetchSlotHeapTuple(slot, true, NULL);
-
/*
* Before we start insertion proper, acquire our "speculative
* insertion lock". Others can use that to wait for us to decide
@@ -434,26 +442,22 @@ ExecInsert(ModifyTableState *mtstate,
* waiting for the whole transaction to complete.
*/
specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
- HeapTupleHeaderSetSpeculativeToken(inserttuple->t_data, specToken);
/* insert the tuple, with the speculative token */
- heap_insert(resultRelationDesc, inserttuple,
- estate->es_output_cid,
- HEAP_INSERT_SPECULATIVE,
- NULL);
- slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
- ItemPointerCopy(&inserttuple->t_self, &slot->tts_tid);
+ table_insert_speculative(resultRelationDesc, slot,
+ estate->es_output_cid,
+ 0,
+ NULL,
+ specToken);
/* insert index entries for tuple */
- recheckIndexes = ExecInsertIndexTuples(slot, &(inserttuple->t_self),
- estate, true, &specConflict,
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, true,
+ &specConflict,
arbiterIndexes);
/* adjust the tuple's state accordingly */
- if (!specConflict)
- heap_finish_speculative(resultRelationDesc, inserttuple);
- else
- heap_abort_speculative(resultRelationDesc, inserttuple);
+ table_complete_speculative(resultRelationDesc, slot,
+ specToken, specConflict);
/*
* Wake up anyone waiting for our decision. They will re-check
@@ -479,23 +483,14 @@ ExecInsert(ModifyTableState *mtstate,
}
else
{
- /*
- * insert the tuple normally.
- *
- * Note: heap_insert returns the tid (location) of the new tuple
- * in the t_self field.
- */
- inserttuple = ExecFetchSlotHeapTuple(slot, true, NULL);
- heap_insert(resultRelationDesc, inserttuple,
- estate->es_output_cid,
- 0, NULL);
- slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
- ItemPointerCopy(&inserttuple->t_self, &slot->tts_tid);
+ /* insert the tuple normally */
+ table_insert(resultRelationDesc, slot,
+ estate->es_output_cid,
+ 0, NULL);
/* insert index entries for tuple */
if (resultRelInfo->ri_NumIndices > 0)
- recheckIndexes = ExecInsertIndexTuples(slot, &(inserttuple->t_self),
- estate, false, NULL,
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
NIL);
}
}
@@ -594,8 +589,8 @@ ExecDelete(ModifyTableState *mtstate,
{
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
- HTSU_Result result;
- HeapUpdateFailureData hufd;
+ TM_Result result;
+ TM_FailureData tmfd;
TupleTableSlot *slot = NULL;
TransitionCaptureState *ar_delete_trig_tcs;
@@ -671,15 +666,17 @@ ExecDelete(ModifyTableState *mtstate,
* mode transactions.
*/
ldelete:;
- result = heap_delete(resultRelationDesc, tupleid,
- estate->es_output_cid,
- estate->es_crosscheck_snapshot,
- true /* wait for commit */ ,
- &hufd,
- changingPart);
+ result = table_delete(resultRelationDesc, tupleid,
+ estate->es_output_cid,
+ estate->es_snapshot,
+ estate->es_crosscheck_snapshot,
+ true /* wait for commit */ ,
+ &tmfd,
+ changingPart);
+
switch (result)
{
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/*
* The target tuple was already updated or deleted by the
@@ -705,7 +702,7 @@ ldelete:;
* can re-execute the DELETE and then return NULL to cancel
* the outer delete.
*/
- if (hufd.cmax != estate->es_output_cid)
+ if (tmfd.cmax != estate->es_output_cid)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
@@ -714,52 +711,98 @@ ldelete:;
/* Else, already deleted by self; nothing to do */
return NULL;
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
break;
- case HeapTupleUpdated:
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be deleted was already moved to another partition due to concurrent update")));
-
- if (!ItemPointerEquals(tupleid, &hufd.ctid))
+ case TM_Updated:
{
- TupleTableSlot *my_epqslot;
-
- my_epqslot = EvalPlanQual(estate,
- epqstate,
- resultRelationDesc,
- resultRelInfo->ri_RangeTableIndex,
- LockTupleExclusive,
- &hufd.ctid,
- hufd.xmax);
- if (!TupIsNull(my_epqslot))
+ TupleTableSlot *inputslot;
+ TupleTableSlot *epqslot;
+
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+
+ /*
+ * Already know that we're going to need to do EPQ, so
+ * fetch tuple directly into the right slot.
+ */
+ EvalPlanQualBegin(epqstate, estate);
+ inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex);
+
+ result = table_lock_tuple(resultRelationDesc, tupleid,
+ estate->es_snapshot,
+ inputslot, estate->es_output_cid,
+ LockTupleExclusive, LockWaitBlock,
+ TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
+ &tmfd);
+
+ switch (result)
{
- *tupleid = hufd.ctid;
+ case TM_Ok:
+ Assert(tmfd.traversed);
+ epqslot = EvalPlanQual(estate,
+ epqstate,
+ resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex,
+ inputslot);
+ if (TupIsNull(epqslot))
+ /* Tuple not passing quals anymore, exiting... */
+ return NULL;
+
+ /*
+ * If requested, skip delete and pass back the
+ * updated row.
+ */
+ if (epqreturnslot)
+ {
+ *epqreturnslot = epqslot;
+ return NULL;
+ }
+ else
+ goto ldelete;
+
+ case TM_Deleted:
+ /* tuple already deleted; nothing to do */
+ return NULL;
- /*
- * If requested, skip delete and pass back the updated
- * row.
- */
- if (epqreturnslot)
- {
- *epqreturnslot = my_epqslot;
+ default:
+
+ /*
+ * TM_Invisible should be impossible because we're
+ * waiting for updated row versions, and would
+ * already have errored out if the first version
+ * is invisible.
+ *
+ * TM_SelfModified should be impossible, as we'd
+ * otherwise should have hit the TM_SelfModified
+ * case in response to table_delete above.
+ *
+ * TM_Updated should be impossible, because we're
+ * locking the latest version via
+ * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
+ */
+ elog(ERROR, "unexpected table_lock_tuple status: %u",
+ result);
return NULL;
- }
- else
- goto ldelete;
}
+
+ Assert(false);
+ break;
}
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent delete")));
/* tuple already deleted; nothing to do */
return NULL;
default:
- elog(ERROR, "unrecognized heap_delete status: %u", result);
+ elog(ERROR, "unrecognized table_delete status: %u", result);
return NULL;
}
@@ -832,8 +875,8 @@ ldelete:;
else
{
BufferHeapTupleTableSlot *bslot;
- HeapTuple deltuple;
- Buffer buffer;
+ HeapTuple deltuple;
+ Buffer buffer;
Assert(TTS_IS_BUFFERTUPLE(slot));
ExecClearTuple(slot);
@@ -842,7 +885,7 @@ ldelete:;
deltuple->t_self = *tupleid;
if (!heap_fetch(resultRelationDesc, SnapshotAny,
- deltuple, &buffer, false, NULL))
+ deltuple, &buffer, NULL))
elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
ExecStorePinnedBufferHeapTuple(deltuple, slot, buffer);
@@ -897,11 +940,10 @@ ExecUpdate(ModifyTableState *mtstate,
EState *estate,
bool canSetTag)
{
- HeapTuple updatetuple;
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
- HTSU_Result result;
- HeapUpdateFailureData hufd;
+ TM_Result result;
+ TM_FailureData tmfd;
List *recheckIndexes = NIL;
TupleConversionMap *saved_tcs_map = NULL;
@@ -960,6 +1002,7 @@ ExecUpdate(ModifyTableState *mtstate,
{
LockTupleMode lockmode;
bool partition_constraint_failed;
+ bool update_indexes;
/*
* Constraints might reference the tableoid column, so (re-)initialize
@@ -973,11 +1016,14 @@ ExecUpdate(ModifyTableState *mtstate,
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck any RLS policies and constraints.
* (We don't need to redo triggers, however. If there are any BEFORE
- * triggers then trigger.c will have done heap_lock_tuple to lock the
+ * triggers then trigger.c will have done table_lock_tuple to lock the
* correct tuple, so there's no need to do them again.)
*/
lreplace:;
+ /* ensure slot is independent, consider e.g. EPQ */
+ ExecMaterializeSlot(slot);
+
/*
* If partition constraint fails, this row might get moved to another
* partition, in which case we should check the RLS CHECK policy just
@@ -1145,18 +1191,16 @@ lreplace:;
* needed for referential integrity updates in transaction-snapshot
* mode transactions.
*/
- updatetuple = ExecFetchSlotHeapTuple(slot, true, NULL);
- result = heap_update(resultRelationDesc, tupleid,
- updatetuple,
- estate->es_output_cid,
- estate->es_crosscheck_snapshot,
- true /* wait for commit */ ,
- &hufd, &lockmode);
- ItemPointerCopy(&updatetuple->t_self, &slot->tts_tid);
+ result = table_update(resultRelationDesc, tupleid, slot,
+ estate->es_output_cid,
+ estate->es_snapshot,
+ estate->es_crosscheck_snapshot,
+ true /* wait for commit */ ,
+ &tmfd, &lockmode, &update_indexes);
switch (result)
{
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/*
* The target tuple was already updated or deleted by the
@@ -1181,7 +1225,7 @@ lreplace:;
* can re-execute the UPDATE (assuming it can figure out how)
* and then return NULL to cancel the outer update.
*/
- if (hufd.cmax != estate->es_output_cid)
+ if (tmfd.cmax != estate->es_output_cid)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
@@ -1190,64 +1234,81 @@ lreplace:;
/* Else, already updated by self; nothing to do */
return NULL;
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
break;
- case HeapTupleUpdated:
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be updated was already moved to another partition due to concurrent update")));
-
- if (!ItemPointerEquals(tupleid, &hufd.ctid))
+ case TM_Updated:
{
+ TupleTableSlot *inputslot;
TupleTableSlot *epqslot;
- epqslot = EvalPlanQual(estate,
- epqstate,
- resultRelationDesc,
- resultRelInfo->ri_RangeTableIndex,
- lockmode,
- &hufd.ctid,
- hufd.xmax);
- if (!TupIsNull(epqslot))
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+
+ /*
+ * Already know that we're going to need to do EPQ, so
+ * fetch tuple directly into the right slot.
+ */
+ EvalPlanQualBegin(epqstate, estate);
+ inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex);
+
+ result = table_lock_tuple(resultRelationDesc, tupleid,
+ estate->es_snapshot,
+ inputslot, estate->es_output_cid,
+ lockmode, LockWaitBlock,
+ TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
+ &tmfd);
+
+ switch (result)
{
- *tupleid = hufd.ctid;
- slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
- goto lreplace;
+ case TM_Ok:
+ Assert(tmfd.traversed);
+
+ epqslot = EvalPlanQual(estate,
+ epqstate,
+ resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex,
+ inputslot);
+ if (TupIsNull(epqslot))
+ /* Tuple not passing quals anymore, exiting... */
+ return NULL;
+
+ slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
+ goto lreplace;
+
+ case TM_Deleted:
+ /* tuple already deleted; nothing to do */
+ return NULL;
+
+ default:
+ /* see table_lock_tuple call in ExecDelete() */
+ elog(ERROR, "unexpected table_lock_tuple status: %u",
+ result);
+ return NULL;
}
}
+
+ break;
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent delete")));
/* tuple already deleted; nothing to do */
return NULL;
default:
- elog(ERROR, "unrecognized heap_update status: %u", result);
+ elog(ERROR, "unrecognized table_update status: %u", result);
return NULL;
}
- /*
- * Note: instead of having to update the old index tuples associated
- * with the heap tuple, all we do is form and insert new index tuples.
- * This is because UPDATEs are actually DELETEs and INSERTs, and index
- * tuple deletion is done later by VACUUM (see notes in ExecDelete).
- * All we do here is insert new index tuples. -cim 9/27/89
- */
-
- /*
- * insert index entries for tuple
- *
- * Note: heap_update returns the tid (location) of the new tuple in
- * the t_self field.
- *
- * If it's a HOT update, we mustn't insert new index entries.
- */
- if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(updatetuple))
- recheckIndexes = ExecInsertIndexTuples(slot, &(updatetuple->t_self),
- estate, false, NULL, NIL);
+ /* insert index entries for tuple if necessary */
+ if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, NIL);
}
if (canSetTag)
@@ -1306,11 +1367,12 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
Relation relation = resultRelInfo->ri_RelationDesc;
ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
- HeapTupleData tuple;
- HeapUpdateFailureData hufd;
+ TM_FailureData tmfd;
LockTupleMode lockmode;
- HTSU_Result test;
- Buffer buffer;
+ TM_Result test;
+ Datum xminDatum;
+ TransactionId xmin;
+ bool isnull;
/* Determine lock mode to use */
lockmode = ExecUpdateLockMode(estate, resultRelInfo);
@@ -1321,35 +1383,42 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
* previous conclusion that the tuple is conclusively committed is not
* true anymore.
*/
- tuple.t_self = *conflictTid;
- test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
- lockmode, LockWaitBlock, false, &buffer,
- &hufd);
+ test = table_lock_tuple(relation, conflictTid,
+ estate->es_snapshot,
+ existing, estate->es_output_cid,
+ lockmode, LockWaitBlock, 0,
+ &tmfd);
switch (test)
{
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
/* success! */
break;
- case HeapTupleInvisible:
+ case TM_Invisible:
/*
* This can occur when a just inserted tuple is updated again in
* the same command. E.g. because multiple rows with the same
* conflicting key values are inserted.
*
- * This is somewhat similar to the ExecUpdate()
- * HeapTupleSelfUpdated case. We do not want to proceed because
- * it would lead to the same row being updated a second time in
- * some unspecified order, and in contrast to plain UPDATEs
- * there's no historical behavior to break.
+ * This is somewhat similar to the ExecUpdate() TM_SelfModified
+ * case. We do not want to proceed because it would lead to the
+ * same row being updated a second time in some unspecified order,
+ * and in contrast to plain UPDATEs there's no historical behavior
+ * to break.
*
* It is the user's responsibility to prevent this situation from
* occurring. These problems are why SQL-2003 similarly specifies
* that for SQL MERGE, an exception must be raised in the event of
* an attempt to update the same row twice.
*/
- if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data)))
+ xminDatum = slot_getsysattr(existing,
+ MinTransactionIdAttributeNumber,
+ &isnull);
+ Assert(!isnull);
+ xmin = DatumGetTransactionId(xminDatum);
+
+ if (TransactionIdIsCurrentTransactionId(xmin))
ereport(ERROR,
(errcode(ERRCODE_CARDINALITY_VIOLATION),
errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
@@ -1359,7 +1428,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
elog(ERROR, "attempted to lock invisible tuple");
break;
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/*
* This state should never be reached. As a dirty snapshot is used
@@ -1369,7 +1438,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
elog(ERROR, "unexpected self-updated tuple");
break;
- case HeapTupleUpdated:
+ case TM_Updated:
if (IsolationUsesXactSnapshot())
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
@@ -1381,7 +1450,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
* be lock is moved to another partition due to concurrent update
* of the partition key.
*/
- Assert(!ItemPointerIndicatesMovedPartitions(&hufd.ctid));
+ Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
/*
* Tell caller to try again from the very start.
@@ -1390,11 +1459,22 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
* loop here, as the new version of the row might not conflict
* anymore, or the conflicting tuple has actually been deleted.
*/
- ReleaseBuffer(buffer);
+ ExecClearTuple(existing);
+ return false;
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent delete")));
+
+ /* see TM_Updated case */
+ Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
+ ExecClearTuple(existing);
return false;
default:
- elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
+ elog(ERROR, "unrecognized table_lock_tuple status: %u", test);
}
/* Success, the tuple is locked. */
@@ -1412,10 +1492,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
* snapshot. This is in line with the way UPDATE deals with newer tuple
* versions.
*/
- ExecCheckHeapTupleVisible(estate, &tuple, buffer);
-
- /* Store target's existing tuple in the state's dedicated slot */
- ExecStorePinnedBufferHeapTuple(&tuple, existing, buffer);
+ ExecCheckTupleVisible(estate, relation, existing);
/*
* Make tuple and any needed join variables available to ExecQual and
@@ -1462,7 +1539,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Note that it is possible that the target tuple has been modified in
- * this session, after the above heap_lock_tuple. We choose to not error
+ * this session, after the above table_lock_tuple. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar cases.
* This can happen if an UPDATE is triggered from within ExecQual(),
* ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
@@ -1470,7 +1547,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
*/
/* Execute UPDATE with projection */
- *returning = ExecUpdate(mtstate, &tuple.t_self, NULL,
+ *returning = ExecUpdate(mtstate, conflictTid, NULL,
resultRelInfo->ri_onConflict->oc_ProjSlot,
planSlot,
&mtstate->mt_epqstate, mtstate->ps.state,
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index 08872ef9b4f..0e6a0748c8c 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -376,7 +376,7 @@ TidNext(TidScanState *node)
if (node->tss_isCurrentOf)
heap_get_latest_tid(heapRelation, snapshot, &tuple->t_self);
- if (heap_fetch(heapRelation, snapshot, tuple, &buffer, false, NULL))
+ if (heap_fetch(heapRelation, snapshot, tuple, &buffer, NULL))
{
/*
* Store the scanned tuple in the scan tuple slot of the scan