aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHash.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2018-03-16 16:03:45 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2018-03-16 16:03:45 -0400
commit9e17bdb8a525ff89c4535cd153dc0f2fa813ea59 (patch)
treefa5a9fe98ac0d78726835566b039c04ed7962125 /src/backend/executor/nodeHash.c
parent013c0baaddd9df7e1f940184a8ab8e478214b46c (diff)
downloadpostgresql-9e17bdb8a525ff89c4535cd153dc0f2fa813ea59.tar.gz
postgresql-9e17bdb8a525ff89c4535cd153dc0f2fa813ea59.zip
Fix query-lifespan memory leakage in repeatedly executed hash joins.
ExecHashTableCreate allocated some memory that wasn't freed by ExecHashTableDestroy, specifically the per-hash-key function information. That's not a huge amount of data, but if one runs a query that repeats a hash join enough times, it builds up. Fix by arranging for the data in question to be kept in the hashtable's hashCxt instead of leaving it "loose" in the query-lifespan executor context. (This ensures that we'll also clean up anything that the hash functions allocate in fn_mcxt.) Per report from Amit Khandekar. It's been like this forever, so back-patch to all supported branches. Discussion: https://postgr.es/m/CAJ3gD9cFofAWGvcxLOxDHC=B0hjtW8yGmUsF2hdGh97CM38=7g@mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeHash.c')
-rw-r--r--src/backend/executor/nodeHash.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 06bb44b1631..4f069d17fd8 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -472,7 +472,8 @@ ExecHashTableCreate(HashState *state, List *hashOperators, bool keepNulls)
* Initialize the hash table control block.
*
* The hashtable control block is just palloc'd from the executor's
- * per-query memory context.
+ * per-query memory context. Everything else should be kept inside the
+ * subsidiary hashCxt or batchCxt.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
hashtable->nbuckets = nbuckets;
@@ -515,6 +516,22 @@ ExecHashTableCreate(HashState *state, List *hashOperators, bool keepNulls)
#endif
/*
+ * Create temporary memory contexts in which to keep the hashtable working
+ * storage. See notes in executor/hashjoin.h.
+ */
+ hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
+ "HashTableContext",
+ ALLOCSET_DEFAULT_SIZES);
+
+ hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
+ "HashBatchContext",
+ ALLOCSET_DEFAULT_SIZES);
+
+ /* Allocate data that will live for the life of the hashjoin */
+
+ oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
+
+ /*
* Get info about the hash functions to be used for each hash key. Also
* remember whether the join operators are strict.
*/
@@ -540,22 +557,6 @@ ExecHashTableCreate(HashState *state, List *hashOperators, bool keepNulls)
i++;
}
- /*
- * Create temporary memory contexts in which to keep the hashtable working
- * storage. See notes in executor/hashjoin.h.
- */
- hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
- "HashTableContext",
- ALLOCSET_DEFAULT_SIZES);
-
- hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
- "HashBatchContext",
- ALLOCSET_DEFAULT_SIZES);
-
- /* Allocate data that will live for the life of the hashjoin */
-
- oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
-
if (nbatch > 1 && hashtable->parallel_state == NULL)
{
/*