diff options
Diffstat (limited to 'src/backend/optimizer/path/costsize.c')
-rw-r--r-- | src/backend/optimizer/path/costsize.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 27ce4cc8069..fda4b2c6e87 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -3525,7 +3525,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, * Get hash table size that executor would use for inner relation. * * XXX for the moment, always assume that skew optimization will be - * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth + * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth * trying to determine that for sure. * * XXX at some point it might be interesting to try to account for skew @@ -3534,7 +3534,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, ExecChooseHashTableSize(inner_path_rows_total, inner_path->pathtarget->width, true, /* useskew */ - parallel_hash, /* try_combined_work_mem */ + parallel_hash, /* try_combined_hash_mem */ outer_path->parallel_workers, &space_allowed, &numbuckets, @@ -3597,6 +3597,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, Cost run_cost = workspace->run_cost; int numbuckets = workspace->numbuckets; int numbatches = workspace->numbatches; + int hash_mem; Cost cpu_per_tuple; QualCost hash_qual_cost; QualCost qp_qual_cost; @@ -3715,16 +3716,17 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, } /* - * If the bucket holding the inner MCV would exceed work_mem, we don't + * If the bucket holding the inner MCV would exceed hash_mem, we don't * want to hash unless there is really no other alternative, so apply * disable_cost. (The executor normally copes with excessive memory usage * by splitting batches, but obviously it cannot separate equal values - * that way, so it will be unable to drive the batch size below work_mem + * that way, so it will be unable to drive the batch size below hash_mem * when this is true.) */ + hash_mem = get_hash_mem(); if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq), inner_path->pathtarget->width) > - (work_mem * 1024L)) + (hash_mem * 1024L)) startup_cost += disable_cost; /* |