diff options
author | Alvaro Herrera <alvherre@alvh.no-ip.org> | 2022-04-07 21:04:36 +0200 |
---|---|---|
committer | Alvaro Herrera <alvherre@alvh.no-ip.org> | 2022-04-07 21:10:03 +0200 |
commit | 99392cdd78b788295e52b9f4942fa11992fd5ba9 (patch) | |
tree | 1c8929b166e87df54b3bd86e389a06be6674a154 /src/backend/executor/nodeLockRows.c | |
parent | dbe29b0d2c96f34b3f3222c6fc1710fcff065f18 (diff) | |
download | postgresql-99392cdd78b788295e52b9f4942fa11992fd5ba9.tar.gz postgresql-99392cdd78b788295e52b9f4942fa11992fd5ba9.zip |
Rewrite some RI code to avoid using SPI
Modify the subroutines called by RI trigger functions that want to check
if a given referenced value exists in the referenced relation to simply
scan the foreign key constraint's unique index, instead of using SPI to
execute
SELECT 1 FROM referenced_relation WHERE ref_key = $1
This saves a lot of work, especially when inserting into or updating a
referencing relation.
This rewrite allows to fix a PK row visibility bug caused by a partition
descriptor hack which requires ActiveSnapshot to be set to come up with
the correct set of partitions for the RI query running under REPEATABLE
READ isolation. We now set that snapshot indepedently of the snapshot
to be used by the PK index scan, so the two no longer interfere. The
buggy output in src/test/isolation/expected/fk-snapshot.out of the
relevant test case added by commit 00cb86e75d6d has been corrected.
(The bug still exists in branch 14, however, but this fix is too
invasive to backpatch.)
Author: Amit Langote <amitlangote09@gmail.com>
Reviewed-by: Kyotaro Horiguchi <horikyota.ntt@gmail.com>
Reviewed-by: Corey Huinker <corey.huinker@gmail.com>
Reviewed-by: Li Japin <japinli@hotmail.com>
Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us>
Reviewed-by: Zhihong Yu <zyu@yugabyte.com>
Discussion: https://postgr.es/m/CA+HiwqGkfJfYdeq5vHPh6eqPKjSbfpDDY+j-kXYFePQedtSLeg@mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeLockRows.c')
-rw-r--r-- | src/backend/executor/nodeLockRows.c | 161 |
1 files changed, 90 insertions, 71 deletions
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 1a9dab25dd6..bbccafb2cfd 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -79,10 +79,7 @@ lnext: Datum datum; bool isNull; ItemPointerData tid; - TM_FailureData tmfd; LockTupleMode lockmode; - int lockflags = 0; - TM_Result test; TupleTableSlot *markSlot; /* clear any leftover test tuple for this rel */ @@ -179,74 +176,11 @@ lnext: break; } - lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS; - if (!IsolationUsesXactSnapshot()) - lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION; - - test = table_tuple_lock(erm->relation, &tid, estate->es_snapshot, - markSlot, estate->es_output_cid, - lockmode, erm->waitPolicy, - lockflags, - &tmfd); - - switch (test) - { - case TM_WouldBlock: - /* couldn't lock tuple in SKIP LOCKED mode */ - goto lnext; - - case TM_SelfModified: - - /* - * The target tuple was already updated or deleted by the - * current command, or by a later command in the current - * transaction. We *must* ignore the tuple in the former - * case, so as to avoid the "Halloween problem" of repeated - * update attempts. In the latter case it might be sensible - * to fetch the updated tuple instead, but doing so would - * require changing heap_update and heap_delete to not - * complain about updating "invisible" tuples, which seems - * pretty scary (table_tuple_lock will not complain, but few - * callers expect TM_Invisible, and we're not one of them). So - * for now, treat the tuple as deleted and do not process. - */ - goto lnext; - - case TM_Ok: - - /* - * Got the lock successfully, the locked tuple saved in - * markSlot for, if needed, EvalPlanQual testing below. - */ - if (tmfd.traversed) - epq_needed = true; - break; - - case TM_Updated: - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - elog(ERROR, "unexpected table_tuple_lock status: %u", - test); - break; - - case TM_Deleted: - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - /* tuple was deleted so don't return it */ - goto lnext; - - case TM_Invisible: - elog(ERROR, "attempted to lock invisible tuple"); - break; - - default: - elog(ERROR, "unrecognized table_tuple_lock status: %u", - test); - } + /* skip tuple if it couldn't be locked */ + if (!ExecLockTableTuple(erm->relation, &tid, markSlot, + estate->es_snapshot, estate->es_output_cid, + lockmode, erm->waitPolicy, &epq_needed)) + goto lnext; /* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */ erm->curCtid = tid; @@ -281,6 +215,91 @@ lnext: return slot; } +/* + * ExecLockTableTuple + * Locks tuple with the specified TID in lockmode following given wait + * policy + * + * Returns true if the tuple was successfully locked. Locked tuple is loaded + * into provided slot. + */ +bool +ExecLockTableTuple(Relation relation, ItemPointer tid, TupleTableSlot *slot, + Snapshot snapshot, CommandId cid, + LockTupleMode lockmode, LockWaitPolicy waitPolicy, + bool *epq_needed) +{ + TM_FailureData tmfd; + int lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS; + TM_Result test; + + if (!IsolationUsesXactSnapshot()) + lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION; + + test = table_tuple_lock(relation, tid, snapshot, slot, cid, lockmode, + waitPolicy, lockflags, &tmfd); + + switch (test) + { + case TM_WouldBlock: + /* couldn't lock tuple in SKIP LOCKED mode */ + return false; + + case TM_SelfModified: + + /* + * The target tuple was already updated or deleted by the current + * command, or by a later command in the current transaction. We + * *must* ignore the tuple in the former case, so as to avoid the + * "Halloween problem" of repeated update attempts. In the latter + * case it might be sensible to fetch the updated tuple instead, + * but doing so would require changing heap_update and heap_delete + * to not complain about updating "invisible" tuples, which seems + * pretty scary (table_tuple_lock will not complain, but few + * callers expect TM_Invisible, and we're not one of them). So for + * now, treat the tuple as deleted and do not process. + */ + return false; + + case TM_Ok: + + /* + * Got the lock successfully, the locked tuple saved in slot for + * EvalPlanQual, if asked by the caller. + */ + if (tmfd.traversed && epq_needed) + *epq_needed = true; + break; + + case TM_Updated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + elog(ERROR, "unexpected table_tuple_lock status: %u", + test); + break; + + case TM_Deleted: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + /* tuple was deleted so don't return it */ + return false; + + case TM_Invisible: + elog(ERROR, "attempted to lock invisible tuple"); + return false; + + default: + elog(ERROR, "unrecognized table_tuple_lock status: %u", test); + return false; + } + + return true; +} + /* ---------------------------------------------------------------- * ExecInitLockRows * |