aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c205
1 files changed, 155 insertions, 50 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index cc67dd813d2..9a8c8e33487 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2496,10 +2496,11 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
}
/*
- * heap_delete - delete a tuple
+ * heap_delete - delete a tuple, optionally fetching it into a slot
*
* See table_tuple_delete() for an explanation of the parameters, except that
- * this routine directly takes a tuple rather than a slot.
+ * this routine directly takes a tuple rather than a slot. Also, we don't
+ * place a lock on the tuple in this function, just fetch the existing version.
*
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
@@ -2508,8 +2509,9 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
*/
TM_Result
heap_delete(Relation relation, ItemPointer tid,
- CommandId cid, Snapshot crosscheck, bool wait,
- TM_FailureData *tmfd, bool changingPart)
+ CommandId cid, Snapshot crosscheck, int options,
+ TM_FailureData *tmfd, bool changingPart,
+ TupleTableSlot *oldSlot)
{
TM_Result result;
TransactionId xid = GetCurrentTransactionId();
@@ -2587,7 +2589,7 @@ l1:
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("attempted to delete invisible tuple")));
}
- else if (result == TM_BeingModified && wait)
+ else if (result == TM_BeingModified && (options & TABLE_MODIFY_WAIT))
{
TransactionId xwait;
uint16 infomask;
@@ -2728,7 +2730,30 @@ l1:
tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
else
tmfd->cmax = InvalidCommandId;
- UnlockReleaseBuffer(buffer);
+
+ /*
+ * If we're asked to lock the updated tuple, we just fetch the
+ * existing tuple. That let's the caller save some resources on
+ * placing the lock.
+ */
+ if (result == TM_Updated &&
+ (options & TABLE_MODIFY_LOCK_UPDATED))
+ {
+ BufferHeapTupleTableSlot *bslot;
+
+ Assert(TTS_IS_BUFFERTUPLE(oldSlot));
+ bslot = (BufferHeapTupleTableSlot *) oldSlot;
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ bslot->base.tupdata = tp;
+ ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
+ oldSlot,
+ buffer);
+ }
+ else
+ {
+ UnlockReleaseBuffer(buffer);
+ }
if (have_tuple_lock)
UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
if (vmbuffer != InvalidBuffer)
@@ -2902,8 +2927,24 @@ l1:
*/
CacheInvalidateHeapTuple(relation, &tp, NULL);
- /* Now we can release the buffer */
- ReleaseBuffer(buffer);
+ /* Fetch the old tuple version if we're asked for that. */
+ if (options & TABLE_MODIFY_FETCH_OLD_TUPLE)
+ {
+ BufferHeapTupleTableSlot *bslot;
+
+ Assert(TTS_IS_BUFFERTUPLE(oldSlot));
+ bslot = (BufferHeapTupleTableSlot *) oldSlot;
+
+ bslot->base.tupdata = tp;
+ ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
+ oldSlot,
+ buffer);
+ }
+ else
+ {
+ /* Now we can release the buffer */
+ ReleaseBuffer(buffer);
+ }
/*
* Release the lmgr tuple lock, if we had it.
@@ -2935,8 +2976,8 @@ simple_heap_delete(Relation relation, ItemPointer tid)
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */ ,
- &tmfd, false /* changingPart */ );
+ TABLE_MODIFY_WAIT /* wait for commit */ ,
+ &tmfd, false /* changingPart */ , NULL);
switch (result)
{
case TM_SelfModified:
@@ -2963,10 +3004,11 @@ simple_heap_delete(Relation relation, ItemPointer tid)
}
/*
- * heap_update - replace a tuple
+ * heap_update - replace a tuple, optionally fetching it into a slot
*
* See table_tuple_update() for an explanation of the parameters, except that
- * this routine directly takes a tuple rather than a slot.
+ * this routine directly takes a tuple rather than a slot. Also, we don't
+ * place a lock on the tuple in this function, just fetch the existing version.
*
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
@@ -2975,9 +3017,9 @@ simple_heap_delete(Relation relation, ItemPointer tid)
*/
TM_Result
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
- CommandId cid, Snapshot crosscheck, bool wait,
+ CommandId cid, Snapshot crosscheck, int options,
TM_FailureData *tmfd, LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes)
+ TU_UpdateIndexes *update_indexes, TupleTableSlot *oldSlot)
{
TM_Result result;
TransactionId xid = GetCurrentTransactionId();
@@ -3154,7 +3196,7 @@ l2:
result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
/* see below about the "no wait" case */
- Assert(result != TM_BeingModified || wait);
+ Assert(result != TM_BeingModified || (options & TABLE_MODIFY_WAIT));
if (result == TM_Invisible)
{
@@ -3163,7 +3205,7 @@ l2:
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("attempted to update invisible tuple")));
}
- else if (result == TM_BeingModified && wait)
+ else if (result == TM_BeingModified && (options & TABLE_MODIFY_WAIT))
{
TransactionId xwait;
uint16 infomask;
@@ -3367,7 +3409,30 @@ l2:
tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
else
tmfd->cmax = InvalidCommandId;
- UnlockReleaseBuffer(buffer);
+
+ /*
+ * If we're asked to lock the updated tuple, we just fetch the
+ * existing tuple. That let's the caller save some resouces on
+ * placing the lock.
+ */
+ if (result == TM_Updated &&
+ (options & TABLE_MODIFY_LOCK_UPDATED))
+ {
+ BufferHeapTupleTableSlot *bslot;
+
+ Assert(TTS_IS_BUFFERTUPLE(oldSlot));
+ bslot = (BufferHeapTupleTableSlot *) oldSlot;
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ bslot->base.tupdata = oldtup;
+ ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
+ oldSlot,
+ buffer);
+ }
+ else
+ {
+ UnlockReleaseBuffer(buffer);
+ }
if (have_tuple_lock)
UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
if (vmbuffer != InvalidBuffer)
@@ -3846,7 +3911,26 @@ l2:
/* Now we can release the buffer(s) */
if (newbuf != buffer)
ReleaseBuffer(newbuf);
- ReleaseBuffer(buffer);
+
+ /* Fetch the old tuple version if we're asked for that. */
+ if (options & TABLE_MODIFY_FETCH_OLD_TUPLE)
+ {
+ BufferHeapTupleTableSlot *bslot;
+
+ Assert(TTS_IS_BUFFERTUPLE(oldSlot));
+ bslot = (BufferHeapTupleTableSlot *) oldSlot;
+
+ bslot->base.tupdata = oldtup;
+ ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
+ oldSlot,
+ buffer);
+ }
+ else
+ {
+ /* Now we can release the buffer */
+ ReleaseBuffer(buffer);
+ }
+
if (BufferIsValid(vmbuffer_new))
ReleaseBuffer(vmbuffer_new);
if (BufferIsValid(vmbuffer))
@@ -4054,8 +4138,8 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup,
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */ ,
- &tmfd, &lockmode, update_indexes);
+ TABLE_MODIFY_WAIT /* wait for commit */ ,
+ &tmfd, &lockmode, update_indexes, NULL);
switch (result)
{
case TM_SelfModified:
@@ -4118,12 +4202,14 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
* tuples.
*
* Output parameters:
- * *tuple: all fields filled in
- * *buffer: set to buffer holding tuple (pinned but not locked at exit)
+ * *slot: BufferHeapTupleTableSlot filled with tuple
* *tmfd: filled in failure cases (see below)
*
* Function results are the same as the ones for table_tuple_lock().
*
+ * If *slot already contains the target tuple, it takes advantage on that by
+ * skipping the ReadBuffer() call.
+ *
* In the failure cases other than TM_Invisible, the routine fills
* *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
* if necessary), and t_cmax (the last only for TM_SelfModified,
@@ -4134,15 +4220,14 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
* See README.tuplock for a thorough explanation of this mechanism.
*/
TM_Result
-heap_lock_tuple(Relation relation, HeapTuple tuple,
+heap_lock_tuple(Relation relation, ItemPointer tid, TupleTableSlot *slot,
CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
- bool follow_updates,
- Buffer *buffer, TM_FailureData *tmfd)
+ bool follow_updates, TM_FailureData *tmfd)
{
TM_Result result;
- ItemPointer tid = &(tuple->t_self);
ItemId lp;
Page page;
+ Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
TransactionId xid,
@@ -4154,8 +4239,24 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
bool skip_tuple_lock = false;
bool have_tuple_lock = false;
bool cleared_all_frozen = false;
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ HeapTuple tuple = &bslot->base.tupdata;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
- *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+ /* Take advantage if slot already contains the relevant tuple */
+ if (!TTS_EMPTY(slot) &&
+ slot->tts_tableOid == relation->rd_id &&
+ ItemPointerCompare(&slot->tts_tid, tid) == 0 &&
+ BufferIsValid(bslot->buffer))
+ {
+ buffer = bslot->buffer;
+ IncrBufferRefCount(buffer);
+ }
+ else
+ {
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+ }
block = ItemPointerGetBlockNumber(tid);
/*
@@ -4164,21 +4265,22 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
- if (PageIsAllVisible(BufferGetPage(*buffer)))
+ if (PageIsAllVisible(BufferGetPage(buffer)))
visibilitymap_pin(relation, block, &vmbuffer);
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- page = BufferGetPage(*buffer);
+ page = BufferGetPage(buffer);
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
Assert(ItemIdIsNormal(lp));
+ tuple->t_self = *tid;
tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
tuple->t_len = ItemIdGetLength(lp);
tuple->t_tableOid = RelationGetRelid(relation);
l3:
- result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
+ result = HeapTupleSatisfiesUpdate(tuple, cid, buffer);
if (result == TM_Invisible)
{
@@ -4207,7 +4309,7 @@ l3:
infomask2 = tuple->t_data->t_infomask2;
ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
- LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
* If any subtransaction of the current top transaction already holds
@@ -4359,12 +4461,12 @@ l3:
{
result = res;
/* recovery code expects to have buffer lock held */
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
}
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* Make sure it's still an appropriate lock, else start over.
@@ -4399,7 +4501,7 @@ l3:
if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
!HEAP_XMAX_IS_EXCL_LOCKED(infomask))
{
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* Make sure it's still an appropriate lock, else start over.
@@ -4427,7 +4529,7 @@ l3:
* No conflict, but if the xmax changed under us in the
* meantime, start over.
*/
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
@@ -4439,7 +4541,7 @@ l3:
}
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
{
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/* if the xmax changed in the meantime, start over */
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
@@ -4467,7 +4569,7 @@ l3:
TransactionIdIsCurrentTransactionId(xwait))
{
/* ... but if the xmax changed in the meantime, start over */
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
@@ -4489,7 +4591,7 @@ l3:
*/
if (require_sleep && (result == TM_Updated || result == TM_Deleted))
{
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
else if (require_sleep)
@@ -4514,7 +4616,7 @@ l3:
*/
result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
@@ -4540,7 +4642,7 @@ l3:
{
result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
break;
@@ -4580,7 +4682,7 @@ l3:
{
result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
break;
@@ -4606,12 +4708,12 @@ l3:
{
result = res;
/* recovery code expects to have buffer lock held */
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
}
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* xwait is done, but if xwait had just locked the tuple then some
@@ -4633,7 +4735,7 @@ l3:
* don't check for this in the multixact case, because some
* locker transactions might still be running.
*/
- UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
+ UpdateXmaxHintBits(tuple->t_data, buffer, xwait);
}
}
@@ -4692,9 +4794,9 @@ failed:
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
{
- LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
visibilitymap_pin(relation, block, &vmbuffer);
- LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
goto l3;
}
@@ -4757,7 +4859,7 @@ failed:
cleared_all_frozen = true;
- MarkBufferDirty(*buffer);
+ MarkBufferDirty(buffer);
/*
* XLOG stuff. You might think that we don't need an XLOG record because
@@ -4777,7 +4879,7 @@ failed:
XLogRecPtr recptr;
XLogBeginInsert();
- XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
xlrec.xmax = xid;
@@ -4798,7 +4900,7 @@ failed:
result = TM_Ok;
out_locked:
- LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
out_unlocked:
if (BufferIsValid(vmbuffer))
@@ -4816,6 +4918,9 @@ out_unlocked:
if (have_tuple_lock)
UnlockTupleTuplock(relation, tid, mode);
+ /* Put the target tuple to the slot */
+ ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
+
return result;
}