aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer/bufmgr.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2007-09-20 17:56:33 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2007-09-20 17:56:33 +0000
commit282d2a03dd30804b01f8042f640d638c2ee76604 (patch)
tree004f08ce31f1bfb03ab55571ad7867babe5b3d7f /src/backend/storage/buffer/bufmgr.c
parentbbf4fdc2538097bb3103806e1419ceef1f289203 (diff)
downloadpostgresql-282d2a03dd30804b01f8042f640d638c2ee76604.tar.gz
postgresql-282d2a03dd30804b01f8042f640d638c2ee76604.zip
HOT updates. When we update a tuple without changing any of its indexed
columns, and the new version can be stored on the same heap page, we no longer generate extra index entries for the new version. Instead, index searches follow the HOT-chain links to ensure they find the correct tuple version. In addition, this patch introduces the ability to "prune" dead tuples on a per-page basis, without having to do a complete VACUUM pass to recover space. VACUUM is still needed to clean up dead index entries, however. Pavan Deolasee, with help from a bunch of other people.
Diffstat (limited to 'src/backend/storage/buffer/bufmgr.c')
-rw-r--r--src/backend/storage/buffer/bufmgr.c51
1 files changed, 50 insertions, 1 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 12564a69ee4..9c0ef67f6bb 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.223 2007/06/30 19:12:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.224 2007/09/20 17:56:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -2067,6 +2067,55 @@ LockBufferForCleanup(Buffer buffer)
}
/*
+ * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
+ *
+ * We won't loop, but just check once to see if the pin count is OK. If
+ * not, return FALSE with no lock held.
+ */
+bool
+ConditionalLockBufferForCleanup(Buffer buffer)
+{
+ volatile BufferDesc *bufHdr;
+
+ Assert(BufferIsValid(buffer));
+
+ if (BufferIsLocal(buffer))
+ {
+ /* There should be exactly one pin */
+ Assert(LocalRefCount[-buffer - 1] > 0);
+ if (LocalRefCount[-buffer - 1] != 1)
+ return false;
+ /* Nobody else to wait for */
+ return true;
+ }
+
+ /* There should be exactly one local pin */
+ Assert(PrivateRefCount[buffer - 1] > 0);
+ if (PrivateRefCount[buffer - 1] != 1)
+ return false;
+
+ /* Try to acquire lock */
+ if (!ConditionalLockBuffer(buffer))
+ return false;
+
+ bufHdr = &BufferDescriptors[buffer - 1];
+ LockBufHdr(bufHdr);
+ Assert(bufHdr->refcount > 0);
+ if (bufHdr->refcount == 1)
+ {
+ /* Successfully acquired exclusive lock with pincount 1 */
+ UnlockBufHdr(bufHdr);
+ return true;
+ }
+
+ /* Failed, so release the lock */
+ UnlockBufHdr(bufHdr);
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ return false;
+}
+
+
+/*
* Functions for buffer I/O handling
*
* Note: We assume that nested buffer I/O never occurs.