aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHash.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2018-01-03 12:53:49 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2018-01-03 12:53:49 -0500
commit6fcde24063047c1195d023dfa08309302987cdcf (patch)
tree73209251382a0ce173b6e82988d385f5cabae3ea /src/backend/executor/nodeHash.c
parent3decd150a2d5a8f8d43010dd0c207746ba946303 (diff)
downloadpostgresql-6fcde24063047c1195d023dfa08309302987cdcf.tar.gz
postgresql-6fcde24063047c1195d023dfa08309302987cdcf.zip
Fix some minor errors in new PHJ code.
Correct ExecParallelHashTuplePrealloc's estimate of whether the space_allowed limit is exceeded. Be more consistent about tuples that are exactly HASH_CHUNK_THRESHOLD in size (they're "small", not "large"). Neither of these things explain the current buildfarm unhappiness, but they're still bugs. Thomas Munro, per gripe by me Discussion: https://postgr.es/m/CAEepm=34PDuR69kfYVhmZPgMdy8pSA-MYbpesEN1SR+2oj3Y+w@mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeHash.c')
-rw-r--r--src/backend/executor/nodeHash.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 52f5c0c26e0..a9149ef81ce 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -2740,7 +2740,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
*/
chunk = hashtable->current_chunk;
if (chunk != NULL &&
- size < HASH_CHUNK_THRESHOLD &&
+ size <= HASH_CHUNK_THRESHOLD &&
chunk->maxlen - chunk->used >= size)
{
@@ -3260,6 +3260,7 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
Assert(batchno > 0);
Assert(batchno < hashtable->nbatch);
+ Assert(size == MAXALIGN(size));
LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
@@ -3280,7 +3281,8 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
if (pstate->growth != PHJ_GROWTH_DISABLED &&
batch->at_least_one_chunk &&
- (batch->shared->estimated_size + size > pstate->space_allowed))
+ (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
+ > pstate->space_allowed))
{
/*
* We have determined that this batch would exceed the space budget if