aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeLockRows.c
diff options
context:
space:
mode:
authorKevin Grittner <kgrittn@postgresql.org>2012-10-26 14:55:36 -0500
committerKevin Grittner <kgrittn@postgresql.org>2012-10-26 14:55:36 -0500
commit6868ed7491b7ea7f0af6133bb66566a2f5fe5a75 (patch)
tree9bef0955809293a5104e4fb0efef6b33a93e80dc /src/backend/executor/nodeLockRows.c
parent17804fa71b4a4e7a099f780616a7b53ea591774d (diff)
downloadpostgresql-6868ed7491b7ea7f0af6133bb66566a2f5fe5a75.tar.gz
postgresql-6868ed7491b7ea7f0af6133bb66566a2f5fe5a75.zip
Throw error if expiring tuple is again updated or deleted.
This prevents surprising behavior when a FOR EACH ROW trigger BEFORE UPDATE or BEFORE DELETE directly or indirectly updates or deletes the the old row. Prior to this patch the requested action on the row could be silently ignored while all triggered actions based on the occurence of the requested action could be committed. One example of how this could happen is if the BEFORE DELETE trigger for a "parent" row deleted "children" which had trigger functions to update summary or status data on the parent. This also prevents similar surprising problems if the query has a volatile function which updates a target row while it is already being updated. There are related issues present in FOR UPDATE cursors and READ COMMITTED queries which are not handled by this patch. These issues need further evalution to determine what change, if any, is needed. Where the new error messages are generated, in most cases the best fix will be to move code from the BEFORE trigger to an AFTER trigger. Where this is not feasible, the trigger can avoid the error by re-issuing the triggering statement and returning NULL. Documentation changes will be submitted in a separate patch. Kevin Grittner and Tom Lane with input from Florian Pflug and Robert Haas, based on problems encountered during conversion of Wisconsin Circuit Court trigger logic to plpgsql triggers.
Diffstat (limited to 'src/backend/executor/nodeLockRows.c')
-rw-r--r--src/backend/executor/nodeLockRows.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index ec0825b460f..6474393d7f4 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -71,8 +71,7 @@ lnext:
bool isNull;
HeapTupleData tuple;
Buffer buffer;
- ItemPointerData update_ctid;
- TransactionId update_xmax;
+ HeapUpdateFailureData hufd;
LockTupleMode lockmode;
HTSU_Result test;
HeapTuple copyTuple;
@@ -117,15 +116,26 @@ lnext:
else
lockmode = LockTupleShared;
- test = heap_lock_tuple(erm->relation, &tuple, &buffer,
- &update_ctid, &update_xmax,
+ test = heap_lock_tuple(erm->relation, &tuple,
estate->es_output_cid,
- lockmode, erm->noWait);
+ lockmode, erm->noWait,
+ &buffer, &hufd);
ReleaseBuffer(buffer);
switch (test)
{
case HeapTupleSelfUpdated:
- /* treat it as deleted; do not process */
+ /*
+ * The target tuple was already updated or deleted by the
+ * current command, or by a later command in the current
+ * transaction. We *must* ignore the tuple in the former
+ * case, so as to avoid the "Halloween problem" of repeated
+ * update attempts. In the latter case it might be sensible
+ * to fetch the updated tuple instead, but doing so would
+ * require changing heap_lock_tuple as well as heap_update and
+ * heap_delete to not complain about updating "invisible"
+ * tuples, which seems pretty scary. So for now, treat the
+ * tuple as deleted and do not process.
+ */
goto lnext;
case HeapTupleMayBeUpdated:
@@ -137,8 +147,7 @@ lnext:
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerEquals(&update_ctid,
- &tuple.t_self))
+ if (ItemPointerEquals(&hufd.ctid, &tuple.t_self))
{
/* Tuple was deleted, so don't return it */
goto lnext;
@@ -146,7 +155,7 @@ lnext:
/* updated, so fetch and lock the updated version */
copyTuple = EvalPlanQualFetch(estate, erm->relation, lockmode,
- &update_ctid, update_xmax);
+ &hufd.ctid, hufd.xmax);
if (copyTuple == NULL)
{