aboutsummaryrefslogtreecommitdiff
path: root/src/include/storage/buf_internals.h
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2014-09-25 10:43:24 -0400
committerRobert Haas <rhaas@postgresql.org>2014-09-25 10:43:24 -0400
commit5d7962c6797c0baae9ffb3b5b9ac0aec7b598bc3 (patch)
tree9abf4b7ad28b57c77305b5b1361d3468642bc299 /src/include/storage/buf_internals.h
parent1dcfb8da09c47d2a7502d1dfab06c8be4b6cf323 (diff)
downloadpostgresql-5d7962c6797c0baae9ffb3b5b9ac0aec7b598bc3.tar.gz
postgresql-5d7962c6797c0baae9ffb3b5b9ac0aec7b598bc3.zip
Change locking regimen around buffer replacement.
Previously, we used an lwlock that was held from the time we began seeking a candidate buffer until the time when we found and pinned one, which is disastrous for concurrency. Instead, use a spinlock which is held just long enough to pop the freelist or advance the clock sweep hand, and then released. If we need to advance the clock sweep further, we reacquire the spinlock once per buffer. This represents a significant increase in atomic operations around buffer eviction, but it still wins on many workloads. On others, it may result in no gain, or even cause a regression, unless the number of buffer mapping locks is also increased. However, that seems like material for a separate commit. We may also need to consider other methods of mitigating contention on this spinlock, such as splitting it into multiple locks or jumping the clock sweep hand more than one buffer at a time, but those, too, seem like separate improvements. Patch by me, inspired by a much larger patch from Amit Kapila. Reviewed by Andres Freund.
Diffstat (limited to 'src/include/storage/buf_internals.h')
-rw-r--r--src/include/storage/buf_internals.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index c019013e720..0e69b633c3f 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -115,7 +115,7 @@ typedef struct buftag
* Note: buf_hdr_lock must be held to examine or change the tag, flags,
* usage_count, refcount, or wait_backend_pid fields. buf_id field never
* changes after initialization, so does not need locking. freeNext is
- * protected by the BufFreelistLock not buf_hdr_lock. The LWLocks can take
+ * protected by the buffer_strategy_lock not buf_hdr_lock. The LWLocks can take
* care of themselves. The buf_hdr_lock is *not* used to control access to
* the data in the buffer!
*
@@ -185,8 +185,7 @@ extern BufferDesc *LocalBufferDescriptors;
*/
/* freelist.c */
-extern volatile BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy,
- bool *lock_held);
+extern volatile BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy);
extern void StrategyFreeBuffer(volatile BufferDesc *buf);
extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
volatile BufferDesc *buf);