aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer/buf_init.c
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2015-01-29 17:49:03 +0100
committerAndres Freund <andres@anarazel.de>2015-01-29 22:48:45 +0100
commited127002d8c592610bc8e716759a1a70657483b6 (patch)
tree73ce1d9c835b4816f66f73884aed857635b44d71 /src/backend/storage/buffer/buf_init.c
parent7142bfbbd34a1dbe34346534d7479915145352b3 (diff)
downloadpostgresql-ed127002d8c592610bc8e716759a1a70657483b6.tar.gz
postgresql-ed127002d8c592610bc8e716759a1a70657483b6.zip
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to cache lines is important for scalability; especially on bigger, multi-socket, machines. Currently the array sometimes already happens to be aligned by happenstance, depending how large previous shared memory allocations were. That can lead to wildly varying performance results after minor configuration changes. In addition to aligning the start of descriptor array, also force the size of individual descriptors to be of a common cache line size (64 bytes). That happens to already be the case on 64bit platforms, but this way we can change the struct BufferDesc more easily. As the alignment primarily matters in highly concurrent workloads which probably all are 64bit these days, and the space wastage of element alignment would be a bit more noticeable on 32bit systems, we don't force the stride to be cacheline sized on 32bit platforms for now. If somebody does actual performance testing, we can reevaluate that decision by changing the definition of BUFFERDESC_PADDED_SIZE. Discussion: 20140202151319.GD32123@awork2.anarazel.de Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter Geoghegan.
Diffstat (limited to 'src/backend/storage/buffer/buf_init.c')
-rw-r--r--src/backend/storage/buffer/buf_init.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index 4434bc36f18..0cd2530be99 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -18,7 +18,7 @@
#include "storage/buf_internals.h"
-BufferDesc *BufferDescriptors;
+BufferDescPadded *BufferDescriptors;
char *BufferBlocks;
@@ -67,9 +67,11 @@ InitBufferPool(void)
bool foundBufs,
foundDescs;
- BufferDescriptors = (BufferDesc *)
+ /* Align descriptors to a cacheline boundary. */
+ BufferDescriptors = (BufferDescPadded *) CACHELINEALIGN(
ShmemInitStruct("Buffer Descriptors",
- NBuffers * sizeof(BufferDesc), &foundDescs);
+ NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
+ &foundDescs));
BufferBlocks = (char *)
ShmemInitStruct("Buffer Blocks",
@@ -83,16 +85,15 @@ InitBufferPool(void)
}
else
{
- BufferDesc *buf;
int i;
- buf = BufferDescriptors;
-
/*
* Initialize all the buffer headers.
*/
- for (i = 0; i < NBuffers; buf++, i++)
+ for (i = 0; i < NBuffers; i++)
{
+ BufferDesc *buf = GetBufferDescriptor(i);
+
CLEAR_BUFFERTAG(buf->tag);
buf->flags = 0;
buf->usage_count = 0;
@@ -114,7 +115,7 @@ InitBufferPool(void)
}
/* Correct last entry of linked list */
- BufferDescriptors[NBuffers - 1].freeNext = FREENEXT_END_OF_LIST;
+ GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
}
/* Init other shared buffer-management stuff */
@@ -133,7 +134,9 @@ BufferShmemSize(void)
Size size = 0;
/* size of buffer descriptors */
- size = add_size(size, mul_size(NBuffers, sizeof(BufferDesc)));
+ size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
+ /* to allow aligning buffer descriptors */
+ size = add_size(size, PG_CACHE_LINE_SIZE);
/* size of data pages */
size = add_size(size, mul_size(NBuffers, BLCKSZ));