aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2018-03-02 17:40:48 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2018-03-02 17:40:48 -0500
commit0b1d1a038babff4aadf0862c28e7b667f1b12a30 (patch)
tree4aa84f8e727026718c61665fdb884356637fb5a0 /src
parente94f2bc809a0c684185666f19d81f6496e732a3a (diff)
downloadpostgresql-0b1d1a038babff4aadf0862c28e7b667f1b12a30.tar.gz
postgresql-0b1d1a038babff4aadf0862c28e7b667f1b12a30.zip
Fix VM buffer pin management in heap_lock_updated_tuple_rec().
Sloppy coding in this function could lead to leaking a VM buffer pin, or to attempting to free the same pin twice. Repair. While at it, reduce the code's tendency to free and reacquire the same page pin. Back-patch to 9.6; before that, this routine did not concern itself with VM pages. Amit Kapila and Tom Lane Discussion: https://postgr.es/m/CAA4eK1KJKwhc=isgTQHjM76CAdVswzNeAuZkh_cx-6QgGkSEgA@mail.gmail.com
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/heapam.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index dc762f913d2..302f8c63ad6 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -5677,6 +5677,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
new_xmax;
TransactionId priorXmax = InvalidTransactionId;
bool cleared_all_frozen = false;
+ bool pinned_desired_page;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
@@ -5698,7 +5699,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
* chain, and there's no further tuple to lock: return success to
* caller.
*/
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
}
l4:
@@ -5711,9 +5713,12 @@ l4:
* to recheck after we have the lock.
*/
if (PageIsAllVisible(BufferGetPage(buf)))
+ {
visibilitymap_pin(rel, block, &vmbuffer);
+ pinned_desired_page = true;
+ }
else
- vmbuffer = InvalidBuffer;
+ pinned_desired_page = false;
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -5722,8 +5727,13 @@ l4:
* all visible while we were busy locking the buffer, we'll have to
* unlock and re-lock, to avoid holding the buffer lock across I/O.
* That's a bit unfortunate, but hopefully shouldn't happen often.
+ *
+ * Note: in some paths through this function, we will reach here
+ * holding a pin on a vm page that may or may not be the one matching
+ * this page. If this page isn't all-visible, we won't use the vm
+ * page, but we hold onto such a pin till the end of the function.
*/
- if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf)))
+ if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
{
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
visibilitymap_pin(rel, block, &vmbuffer);
@@ -5749,8 +5759,8 @@ l4:
*/
if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
{
- UnlockReleaseBuffer(buf);
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_locked;
}
old_infomask = mytup.t_data->t_infomask;
@@ -5957,8 +5967,6 @@ next:
priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
UnlockReleaseBuffer(buf);
- if (vmbuffer != InvalidBuffer)
- ReleaseBuffer(vmbuffer);
}
result = HeapTupleMayBeUpdated;
@@ -5966,11 +5974,11 @@ next:
out_locked:
UnlockReleaseBuffer(buf);
+out_unlocked:
if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer);
return result;
-
}
/*