aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/heapam.c49
-rw-r--r--src/backend/access/heap/rewriteheap.c7
-rw-r--r--src/backend/commands/vacuumlazy.c3
-rw-r--r--src/include/access/heapam.h3
4 files changed, 7 insertions, 55 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index c910863e273..3b7894e8f1a 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -3947,10 +3947,8 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
* because this function is applied during WAL recovery, when we don't have
* access to any such state, and can't depend on the hint bits to be set.)
*
- * In lazy VACUUM, we call this while initially holding only a shared lock
- * on the tuple's buffer. If any change is needed, we trade that in for an
- * exclusive lock before making the change. Caller should pass the buffer ID
- * if shared lock is held, InvalidBuffer if exclusive lock is already held.
+ * If the tuple is in a shared buffer, caller must hold an exclusive lock on
+ * that buffer.
*
* Note: it might seem we could make the changes without exclusive lock, since
* TransactionId read/write is assumed atomic anyway. However there is a race
@@ -3962,8 +3960,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
* infomask bits.
*/
bool
-heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
- Buffer buf)
+heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
{
bool changed = false;
TransactionId xid;
@@ -3972,13 +3969,6 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
{
- if (buf != InvalidBuffer)
- {
- /* trade in share lock for exclusive lock */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- buf = InvalidBuffer;
- }
HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
/*
@@ -3990,28 +3980,12 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
changed = true;
}
- /*
- * When we release shared lock, it's possible for someone else to change
- * xmax before we get the lock back, so repeat the check after acquiring
- * exclusive lock. (We don't need this pushup for xmin, because only
- * VACUUM could be interested in changing an existing tuple's xmin, and
- * there's only one VACUUM allowed on a table at a time.)
- */
-recheck_xmax:
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
{
xid = HeapTupleHeaderGetXmax(tuple);
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
{
- if (buf != InvalidBuffer)
- {
- /* trade in share lock for exclusive lock */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- buf = InvalidBuffer;
- goto recheck_xmax; /* see comment above */
- }
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
@@ -4046,30 +4020,15 @@ recheck_xmax:
}
/*
- * Although xvac per se could only be set by old-style VACUUM FULL, it
- * shares physical storage space with cmax, and so could be wiped out by
- * someone setting xmax. Hence recheck after changing lock, same as for
- * xmax itself.
- *
* Old-style VACUUM FULL is gone, but we have to keep this code as long as
* we support having MOVED_OFF/MOVED_IN tuples in the database.
*/
-recheck_xvac:
if (tuple->t_infomask & HEAP_MOVED)
{
xid = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
{
- if (buf != InvalidBuffer)
- {
- /* trade in share lock for exclusive lock */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- buf = InvalidBuffer;
- goto recheck_xvac; /* see comment above */
- }
-
/*
* If a MOVED_OFF tuple is not dead, the xvac transaction must
* have failed; whereas a non-dead MOVED_IN tuple must mean the
@@ -4711,7 +4670,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
ItemId lp = PageGetItemId(page, *offsets);
HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp);
- (void) heap_freeze_tuple(tuple, cutoff_xid, InvalidBuffer);
+ (void) heap_freeze_tuple(tuple, cutoff_xid);
offsets++;
}
}
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 31b2b678b6c..9a8f05d9331 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -335,13 +335,8 @@ rewrite_heap_tuple(RewriteState state,
/*
* While we have our hands on the tuple, we may as well freeze any
* very-old xmin or xmax, so that future VACUUM effort can be saved.
- *
- * Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
- * be given a pointer to a tuple in a disk buffer. It happens though that
- * we can get the right things to happen by passing InvalidBuffer for the
- * buffer.
*/
- heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
+ heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid);
/*
* Invalid ctid means that ctid should point to the tuple itself. We'll
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 2fc749e6302..11e8da1fc1b 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -784,8 +784,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Each non-removable tuple must be checked to see if it needs
* freezing. Note we already have exclusive buffer lock.
*/
- if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
- InvalidBuffer))
+ if (heap_freeze_tuple(tuple.t_data, FreezeLimit))
frozen[nfrozen++] = offnum;
}
} /* scan along page */
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index fa38803b249..9d5da7fb3a5 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -111,8 +111,7 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
TransactionId *update_xmax, CommandId cid,
LockTupleMode mode, bool nowait);
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
-extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
- Buffer buf);
+extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid);
extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf);