aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2017-02-27 03:41:44 -0800
committerAndres Freund <andres@anarazel.de>2017-02-27 03:41:44 -0800
commit9fab40ad32efa4038d19eaed975bb4c1713ccbc0 (patch)
tree74e11745b958b572cf6222eb6cdc8f04a324db52
parent58b25e98106dbe062cec0f3d31d64977bffaa4af (diff)
downloadpostgresql-9fab40ad32efa4038d19eaed975bb4c1713ccbc0.tar.gz
postgresql-9fab40ad32efa4038d19eaed975bb4c1713ccbc0.zip
Use the new "Slab" context for some allocations in reorderbuffer.h.
Note that this change alone does not yet fully address the performance problems triggering this work, a large portion of the slowdown is triggered by the tuple allocator, which isn't converted to the new allocator. It would be possible to do so, but using evenly sized objects, like both the current implementation in reorderbuffer.c and slab.c, wastes a fair amount of memory. A later patch by Tomas will introduce a better approach. Author: Tomas Vondra Reviewed-By: Andres Freund Discussion: https://postgr.es/m/d15dff83-0b37-28ed-0809-95a5cc7292ad@2ndquadrant.com
-rw-r--r--src/backend/replication/logical/reorderbuffer.c74
-rw-r--r--src/include/replication/reorderbuffer.h14
2 files changed, 22 insertions, 66 deletions
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 7dc97fa7967..8aac670bd45 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -156,10 +156,7 @@ static const Size max_changes_in_memory = 4096;
* major bottleneck, especially when spilling to disk while decoding batch
* workloads.
*/
-static const Size max_cached_changes = 4096 * 2;
static const Size max_cached_tuplebufs = 4096 * 2; /* ~8MB */
-static const Size max_cached_transactions = 512;
-
/* ---------------------------------------
* primary reorderbuffer support routines
@@ -241,6 +238,16 @@ ReorderBufferAllocate(void)
buffer->context = new_ctx;
+ buffer->change_context = SlabContextCreate(new_ctx,
+ "Change",
+ SLAB_DEFAULT_BLOCK_SIZE,
+ sizeof(ReorderBufferChange));
+
+ buffer->txn_context = SlabContextCreate(new_ctx,
+ "TXN",
+ SLAB_DEFAULT_BLOCK_SIZE,
+ sizeof(ReorderBufferTXN));
+
hash_ctl.keysize = sizeof(TransactionId);
hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
hash_ctl.hcxt = buffer->context;
@@ -251,8 +258,6 @@ ReorderBufferAllocate(void)
buffer->by_txn_last_xid = InvalidTransactionId;
buffer->by_txn_last_txn = NULL;
- buffer->nr_cached_transactions = 0;
- buffer->nr_cached_changes = 0;
buffer->nr_cached_tuplebufs = 0;
buffer->outbuf = NULL;
@@ -261,8 +266,6 @@ ReorderBufferAllocate(void)
buffer->current_restart_decoding_lsn = InvalidXLogRecPtr;
dlist_init(&buffer->toplevel_by_lsn);
- dlist_init(&buffer->cached_transactions);
- dlist_init(&buffer->cached_changes);
slist_init(&buffer->cached_tuplebufs);
return buffer;
@@ -291,19 +294,8 @@ ReorderBufferGetTXN(ReorderBuffer *rb)
{
ReorderBufferTXN *txn;
- /* check the slab cache */
- if (rb->nr_cached_transactions > 0)
- {
- rb->nr_cached_transactions--;
- txn = (ReorderBufferTXN *)
- dlist_container(ReorderBufferTXN, node,
- dlist_pop_head_node(&rb->cached_transactions));
- }
- else
- {
- txn = (ReorderBufferTXN *)
- MemoryContextAlloc(rb->context, sizeof(ReorderBufferTXN));
- }
+ txn = (ReorderBufferTXN *)
+ MemoryContextAlloc(rb->txn_context, sizeof(ReorderBufferTXN));
memset(txn, 0, sizeof(ReorderBufferTXN));
@@ -344,18 +336,7 @@ ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
txn->invalidations = NULL;
}
- /* check whether to put into the slab cache */
- if (rb->nr_cached_transactions < max_cached_transactions)
- {
- rb->nr_cached_transactions++;
- dlist_push_head(&rb->cached_transactions, &txn->node);
- VALGRIND_MAKE_MEM_UNDEFINED(txn, sizeof(ReorderBufferTXN));
- VALGRIND_MAKE_MEM_DEFINED(&txn->node, sizeof(txn->node));
- }
- else
- {
- pfree(txn);
- }
+ pfree(txn);
}
/*
@@ -366,19 +347,8 @@ ReorderBufferGetChange(ReorderBuffer *rb)
{
ReorderBufferChange *change;
- /* check the slab cache */
- if (rb->nr_cached_changes)
- {
- rb->nr_cached_changes--;
- change = (ReorderBufferChange *)
- dlist_container(ReorderBufferChange, node,
- dlist_pop_head_node(&rb->cached_changes));
- }
- else
- {
- change = (ReorderBufferChange *)
- MemoryContextAlloc(rb->context, sizeof(ReorderBufferChange));
- }
+ change = (ReorderBufferChange *)
+ MemoryContextAlloc(rb->change_context, sizeof(ReorderBufferChange));
memset(change, 0, sizeof(ReorderBufferChange));
return change;
@@ -434,21 +404,9 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change)
break;
}
- /* check whether to put into the slab cache */
- if (rb->nr_cached_changes < max_cached_changes)
- {
- rb->nr_cached_changes++;
- dlist_push_head(&rb->cached_changes, &change->node);
- VALGRIND_MAKE_MEM_UNDEFINED(change, sizeof(ReorderBufferChange));
- VALGRIND_MAKE_MEM_DEFINED(&change->node, sizeof(change->node));
- }
- else
- {
- pfree(change);
- }
+ pfree(change);
}
-
/*
* Get an unused, possibly preallocated, ReorderBufferTupleBuf fitting at
* least a tuple of size tuple_len (excluding header overhead).
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 25b0fc8c0ae..17e47b385b7 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -331,6 +331,12 @@ struct ReorderBuffer
MemoryContext context;
/*
+ * Memory contexts for specific types objects
+ */
+ MemoryContext change_context;
+ MemoryContext txn_context;
+
+ /*
* Data structure slab cache.
*
* We allocate/deallocate some structures very frequently, to avoid bigger
@@ -340,14 +346,6 @@ struct ReorderBuffer
* on top of reorderbuffer.c
*/
- /* cached ReorderBufferTXNs */
- dlist_head cached_transactions;
- Size nr_cached_transactions;
-
- /* cached ReorderBufferChanges */
- dlist_head cached_changes;
- Size nr_cached_changes;
-
/* cached ReorderBufferTupleBufs */
slist_head cached_tuplebufs;
Size nr_cached_tuplebufs;