diff options
Diffstat (limited to 'src/backend/executor/nodeAgg.c')
-rw-r--r-- | src/backend/executor/nodeAgg.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 914b02ceee4..39bea204d16 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -1802,15 +1802,15 @@ hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits, { int npartitions; Size partition_mem; - int hash_mem = get_hash_mem(); + Size hash_mem_limit = get_hash_memory_limit(); /* if not expected to spill, use all of hash_mem */ - if (input_groups * hashentrysize < hash_mem * 1024L) + if (input_groups * hashentrysize <= hash_mem_limit) { if (num_partitions != NULL) *num_partitions = 0; - *mem_limit = hash_mem * 1024L; - *ngroups_limit = *mem_limit / hashentrysize; + *mem_limit = hash_mem_limit; + *ngroups_limit = hash_mem_limit / hashentrysize; return; } @@ -1835,10 +1835,10 @@ hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits, * minimum number of partitions, so we aren't going to dramatically exceed * work mem anyway. */ - if (hash_mem * 1024L > 4 * partition_mem) - *mem_limit = hash_mem * 1024L - partition_mem; + if (hash_mem_limit > 4 * partition_mem) + *mem_limit = hash_mem_limit - partition_mem; else - *mem_limit = hash_mem * 1024L * 0.75; + *mem_limit = hash_mem_limit * 0.75; if (*mem_limit > hashentrysize) *ngroups_limit = *mem_limit / hashentrysize; @@ -1992,32 +1992,36 @@ static int hash_choose_num_partitions(double input_groups, double hashentrysize, int used_bits, int *log2_npartitions) { - Size mem_wanted; - int partition_limit; + Size hash_mem_limit = get_hash_memory_limit(); + double partition_limit; + double mem_wanted; + double dpartitions; int npartitions; int partition_bits; - int hash_mem = get_hash_mem(); /* * Avoid creating so many partitions that the memory requirements of the * open partition files are greater than 1/4 of hash_mem. */ partition_limit = - (hash_mem * 1024L * 0.25 - HASHAGG_READ_BUFFER_SIZE) / + (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) / HASHAGG_WRITE_BUFFER_SIZE; mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize; /* make enough partitions so that each one is likely to fit in memory */ - npartitions = 1 + (mem_wanted / (hash_mem * 1024L)); + dpartitions = 1 + (mem_wanted / hash_mem_limit); + + if (dpartitions > partition_limit) + dpartitions = partition_limit; - if (npartitions > partition_limit) - npartitions = partition_limit; + if (dpartitions < HASHAGG_MIN_PARTITIONS) + dpartitions = HASHAGG_MIN_PARTITIONS; + if (dpartitions > HASHAGG_MAX_PARTITIONS) + dpartitions = HASHAGG_MAX_PARTITIONS; - if (npartitions < HASHAGG_MIN_PARTITIONS) - npartitions = HASHAGG_MIN_PARTITIONS; - if (npartitions > HASHAGG_MAX_PARTITIONS) - npartitions = HASHAGG_MAX_PARTITIONS; + /* HASHAGG_MAX_PARTITIONS limit makes this safe */ + npartitions = (int) dpartitions; /* ceil(log2(npartitions)) */ partition_bits = my_log2(npartitions); @@ -2030,7 +2034,7 @@ hash_choose_num_partitions(double input_groups, double hashentrysize, *log2_npartitions = partition_bits; /* number of partitions will be a power of two */ - npartitions = 1L << partition_bits; + npartitions = 1 << partition_bits; return npartitions; } |