aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer/localbuf.c
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2015-01-29 17:49:03 +0100
committerAndres Freund <andres@anarazel.de>2015-01-29 22:48:45 +0100
commited127002d8c592610bc8e716759a1a70657483b6 (patch)
tree73ce1d9c835b4816f66f73884aed857635b44d71 /src/backend/storage/buffer/localbuf.c
parent7142bfbbd34a1dbe34346534d7479915145352b3 (diff)
downloadpostgresql-ed127002d8c592610bc8e716759a1a70657483b6.tar.gz
postgresql-ed127002d8c592610bc8e716759a1a70657483b6.zip
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to cache lines is important for scalability; especially on bigger, multi-socket, machines. Currently the array sometimes already happens to be aligned by happenstance, depending how large previous shared memory allocations were. That can lead to wildly varying performance results after minor configuration changes. In addition to aligning the start of descriptor array, also force the size of individual descriptors to be of a common cache line size (64 bytes). That happens to already be the case on 64bit platforms, but this way we can change the struct BufferDesc more easily. As the alignment primarily matters in highly concurrent workloads which probably all are 64bit these days, and the space wastage of element alignment would be a bit more noticeable on 32bit systems, we don't force the stride to be cacheline sized on 32bit platforms for now. If somebody does actual performance testing, we can reevaluate that decision by changing the definition of BUFFERDESC_PADDED_SIZE. Discussion: 20140202151319.GD32123@awork2.anarazel.de Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter Geoghegan.
Diffstat (limited to 'src/backend/storage/buffer/localbuf.c')
-rw-r--r--src/backend/storage/buffer/localbuf.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 1fc0af386cb..3144afe37bc 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -122,7 +122,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
if (hresult)
{
b = hresult->id;
- bufHdr = &LocalBufferDescriptors[b];
+ bufHdr = GetLocalBufferDescriptor(b);
Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
@@ -165,7 +165,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
if (++nextFreeLocalBuf >= NLocBuffer)
nextFreeLocalBuf = 0;
- bufHdr = &LocalBufferDescriptors[b];
+ bufHdr = GetLocalBufferDescriptor(b);
if (LocalRefCount[b] == 0)
{
@@ -278,7 +278,7 @@ MarkLocalBufferDirty(Buffer buffer)
Assert(LocalRefCount[bufid] > 0);
- bufHdr = &LocalBufferDescriptors[bufid];
+ bufHdr = GetLocalBufferDescriptor(bufid);
if (!(bufHdr->flags & BM_DIRTY))
pgBufferUsage.local_blks_dirtied++;
@@ -305,7 +305,7 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
for (i = 0; i < NLocBuffer; i++)
{
- BufferDesc *bufHdr = &LocalBufferDescriptors[i];
+ BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
LocalBufferLookupEnt *hresult;
if ((bufHdr->flags & BM_TAG_VALID) &&
@@ -347,7 +347,7 @@ DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
for (i = 0; i < NLocBuffer; i++)
{
- BufferDesc *bufHdr = &LocalBufferDescriptors[i];
+ BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
LocalBufferLookupEnt *hresult;
if ((bufHdr->flags & BM_TAG_VALID) &&
@@ -400,7 +400,7 @@ InitLocalBuffers(void)
/* initialize fields that need to start off nonzero */
for (i = 0; i < nbufs; i++)
{
- BufferDesc *buf = &LocalBufferDescriptors[i];
+ BufferDesc *buf = GetLocalBufferDescriptor(i);
/*
* negative to indicate local buffer. This is tricky: shared buffers