aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer/bufmgr.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/buffer/bufmgr.c')
-rw-r--r--src/backend/storage/buffer/bufmgr.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index df4c9d71094..58b0a975c0f 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -3745,6 +3745,55 @@ ConditionalLockBufferForCleanup(Buffer buffer)
return false;
}
+/*
+ * IsBufferCleanupOK - as above, but we already have the lock
+ *
+ * Check whether it's OK to perform cleanup on a buffer we've already
+ * locked. If we observe that the pin count is 1, our exclusive lock
+ * happens to be a cleanup lock, and we can proceed with anything that
+ * would have been allowable had we sought a cleanup lock originally.
+ */
+bool
+IsBufferCleanupOK(Buffer buffer)
+{
+ BufferDesc *bufHdr;
+ uint32 buf_state;
+
+ Assert(BufferIsValid(buffer));
+
+ if (BufferIsLocal(buffer))
+ {
+ /* There should be exactly one pin */
+ if (LocalRefCount[-buffer - 1] != 1)
+ return false;
+ /* Nobody else to wait for */
+ return true;
+ }
+
+ /* There should be exactly one local pin */
+ if (GetPrivateRefCount(buffer) != 1)
+ return false;
+
+ bufHdr = GetBufferDescriptor(buffer - 1);
+
+ /* caller must hold exclusive lock on buffer */
+ Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
+ LW_EXCLUSIVE));
+
+ buf_state = LockBufHdr(bufHdr);
+
+ Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
+ if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
+ {
+ /* pincount is OK. */
+ UnlockBufHdr(bufHdr, buf_state);
+ return true;
+ }
+
+ UnlockBufHdr(bufHdr, buf_state);
+ return false;
+}
+
/*
* Functions for buffer I/O handling