diff options
author | Andres Freund <andres@anarazel.de> | 2019-03-23 19:55:57 -0700 |
---|---|---|
committer | Andres Freund <andres@anarazel.de> | 2019-03-23 19:55:57 -0700 |
commit | 5db6df0c0117ff2a4e0cd87594d2db408cd5022f (patch) | |
tree | 7b06b96b6f8c1b7e4cdfb602af357f81e21f23b1 /src/backend/commands/trigger.c | |
parent | f778e537a0d02d5e05016da3e6f4068914101dee (diff) | |
download | postgresql-5db6df0c0117ff2a4e0cd87594d2db408cd5022f.tar.gz postgresql-5db6df0c0117ff2a4e0cd87594d2db408cd5022f.zip |
tableam: Add tuple_{insert, delete, update, lock} and use.
This adds new, required, table AM callbacks for insert/delete/update
and lock_tuple. To be able to reasonably use those, the EvalPlanQual
mechanism had to be adapted, moving more logic into the AM.
Previously both delete/update/lock call-sites and the EPQ mechanism had
to have awareness of the specific tuple format to be able to fetch the
latest version of a tuple. Obviously that needs to be abstracted
away. To do so, move the logic that find the latest row version into
the AM. lock_tuple has a new flag argument,
TUPLE_LOCK_FLAG_FIND_LAST_VERSION, that forces it to lock the last
version, rather than the current one. It'd have been possible to do
so via a separate callback as well, but finding the last version
usually also necessitates locking the newest version, making it
sensible to combine the two. This replaces the previous use of
EvalPlanQualFetch(). Additionally HeapTupleUpdated, which previously
signaled either a concurrent update or delete, is now split into two,
to avoid callers needing AM specific knowledge to differentiate.
The move of finding the latest row version into tuple_lock means that
encountering a row concurrently moved into another partition will now
raise an error about "tuple to be locked" rather than "tuple to be
updated/deleted" - which is accurate, as that always happens when
locking rows. While possible slightly less helpful for users, it seems
like an acceptable trade-off.
As part of this commit HTSU_Result has been renamed to TM_Result, and
its members been expanded to differentiated between updating and
deleting. HeapUpdateFailureData has been renamed to TM_FailureData.
The interface to speculative insertion is changed so nodeModifyTable.c
does not have to set the speculative token itself anymore. Instead
there's a version of tuple_insert, tuple_insert_speculative, that
performs the speculative insertion (without requiring a flag to signal
that fact), and the speculative insertion is either made permanent
with table_complete_speculative(succeeded = true) or aborted with
succeeded = false).
Note that multi_insert is not yet routed through tableam, nor is
COPY. Changing multi_insert requires changes to copy.c that are large
enough to better be done separately.
Similarly, although simpler, CREATE TABLE AS and CREATE MATERIALIZED
VIEW are also only going to be adjusted in a later commit.
Author: Andres Freund and Haribabu Kommi
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20190313003903.nwvrxi7rw3ywhdel@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
Diffstat (limited to 'src/backend/commands/trigger.c')
-rw-r--r-- | src/backend/commands/trigger.c | 112 |
1 files changed, 51 insertions, 61 deletions
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 71098896947..bf12b848105 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -15,6 +15,7 @@ #include "access/genam.h" #include "access/heapam.h" +#include "access/tableam.h" #include "access/sysattr.h" #include "access/htup_details.h" #include "access/xact.h" @@ -3285,19 +3286,12 @@ GetTupleForTrigger(EState *estate, TupleTableSlot **newSlot) { Relation relation = relinfo->ri_RelationDesc; - HeapTuple tuple; - Buffer buffer; - BufferHeapTupleTableSlot *boldslot; - - Assert(TTS_IS_BUFFERTUPLE(oldslot)); - ExecClearTuple(oldslot); - boldslot = (BufferHeapTupleTableSlot *) oldslot; - tuple = &boldslot->base.tupdata; if (newSlot != NULL) { - HTSU_Result test; - HeapUpdateFailureData hufd; + TM_Result test; + TM_FailureData tmfd; + int lockflags = 0; *newSlot = NULL; @@ -3307,15 +3301,17 @@ GetTupleForTrigger(EState *estate, /* * lock tuple for update */ -ltrmark:; - tuple->t_self = *tid; - test = heap_lock_tuple(relation, tuple, - estate->es_output_cid, - lockmode, LockWaitBlock, - false, &buffer, &hufd); + if (!IsolationUsesXactSnapshot()) + lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION; + test = table_lock_tuple(relation, tid, estate->es_snapshot, oldslot, + estate->es_output_cid, + lockmode, LockWaitBlock, + lockflags, + &tmfd); + switch (test) { - case HeapTupleSelfUpdated: + case TM_SelfModified: /* * The target tuple was already updated or deleted by the @@ -3325,73 +3321,59 @@ ltrmark:; * enumerated in ExecUpdate and ExecDelete in * nodeModifyTable.c. */ - if (hufd.cmax != estate->es_output_cid) + if (tmfd.cmax != estate->es_output_cid) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), errmsg("tuple to be updated was already modified by an operation triggered by the current command"), errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); /* treat it as deleted; do not process */ - ReleaseBuffer(buffer); return false; - case HeapTupleMayBeUpdated: - ExecStorePinnedBufferHeapTuple(tuple, oldslot, buffer); - - break; - - case HeapTupleUpdated: - ReleaseBuffer(buffer); - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("tuple to be locked was already moved to another partition due to concurrent update"))); - - if (!ItemPointerEquals(&hufd.ctid, &tuple->t_self)) + case TM_Ok: + if (tmfd.traversed) { - /* it was updated, so look at the updated version */ TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, epqstate, relation, relinfo->ri_RangeTableIndex, - lockmode, - &hufd.ctid, - hufd.xmax); - if (!TupIsNull(epqslot)) - { - *tid = hufd.ctid; + oldslot); - *newSlot = epqslot; + /* + * If PlanQual failed for updated tuple - we must not + * process this tuple! + */ + if (TupIsNull(epqslot)) + return false; - /* - * EvalPlanQual already locked the tuple, but we - * re-call heap_lock_tuple anyway as an easy way of - * re-fetching the correct tuple. Speed is hardly a - * criterion in this path anyhow. - */ - goto ltrmark; - } + *newSlot = epqslot; } + break; - /* - * if tuple was deleted or PlanQual failed for updated tuple - - * we must not process this tuple! - */ + case TM_Updated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + elog(ERROR, "unexpected table_lock_tuple status: %u", test); + break; + + case TM_Deleted: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent delete"))); + /* tuple was deleted */ return false; - case HeapTupleInvisible: + case TM_Invisible: elog(ERROR, "attempted to lock invisible tuple"); break; default: - ReleaseBuffer(buffer); - elog(ERROR, "unrecognized heap_lock_tuple status: %u", test); + elog(ERROR, "unrecognized table_lock_tuple status: %u", test); return false; /* keep compiler quiet */ } } @@ -3399,6 +3381,14 @@ ltrmark:; { Page page; ItemId lp; + Buffer buffer; + BufferHeapTupleTableSlot *boldslot; + HeapTuple tuple; + + Assert(TTS_IS_BUFFERTUPLE(oldslot)); + ExecClearTuple(oldslot); + boldslot = (BufferHeapTupleTableSlot *) oldslot; + tuple = &boldslot->base.tupdata; buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); @@ -4286,7 +4276,7 @@ AfterTriggerExecute(EState *estate, LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo); ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self)); - if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer, false, NULL)) + if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer, NULL)) elog(ERROR, "failed to fetch tuple1 for AFTER trigger"); ExecStorePinnedBufferHeapTuple(&tuple1, LocTriggerData.tg_trigslot, @@ -4310,7 +4300,7 @@ AfterTriggerExecute(EState *estate, LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo); ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self)); - if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer, false, NULL)) + if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer, NULL)) elog(ERROR, "failed to fetch tuple2 for AFTER trigger"); ExecStorePinnedBufferHeapTuple(&tuple2, LocTriggerData.tg_newslot, |