aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRichard Guo <rguo@postgresql.org>2024-07-23 10:33:26 +0900
committerRichard Guo <rguo@postgresql.org>2024-07-23 10:33:26 +0900
commit581df2148737fdb0ba6f2d8fda5ceb9d1e6302e6 (patch)
tree908998effc2d0cc771ce3bea2a2130736479a6cd /src
parentd2cba4f2cbfe69b2c5f93f364da4e574e075cb03 (diff)
downloadpostgresql-581df2148737fdb0ba6f2d8fda5ceb9d1e6302e6.tar.gz
postgresql-581df2148737fdb0ba6f2d8fda5ceb9d1e6302e6.zip
Fix rowcount estimate for gather (merge) paths
In the case of a parallel plan, when computing the number of tuples processed per worker, we divide the total number of tuples by the parallel_divisor obtained from get_parallel_divisor(), which accounts for the leader's contribution in addition to the number of workers. Accordingly, when estimating the number of tuples for gather (merge) nodes, we should multiply the number of tuples per worker by the same parallel_divisor to reverse the division. However, currently we use parallel_workers rather than parallel_divisor for the multiplication. This could result in an underestimation of the number of tuples for gather (merge) nodes, especially when there are fewer than four workers. This patch fixes this issue by using the same parallel_divisor for the multiplication. There is one ensuing plan change in the regression tests, but it looks reasonable and does not compromise its original purpose of testing parallel-aware hash join. In passing, this patch removes an unnecessary assignment for path.rows in create_gather_merge_path, and fixes an uninitialized-variable issue in generate_useful_gather_paths. No backpatch as this could result in plan changes. Author: Anthonin Bonnefoy Reviewed-by: Rafia Sabih, Richard Guo Discussion: https://postgr.es/m/CAO6_Xqr9+51NxgO=XospEkUeAg-p=EjAWmtpdcZwjRgGKJ53iA@mail.gmail.com
Diffstat (limited to 'src')
-rw-r--r--src/backend/optimizer/path/allpaths.c7
-rw-r--r--src/backend/optimizer/path/costsize.c18
-rw-r--r--src/backend/optimizer/plan/planner.c7
-rw-r--r--src/backend/optimizer/util/pathnode.c1
-rw-r--r--src/include/optimizer/cost.h1
-rw-r--r--src/test/regress/expected/join_hash.out19
6 files changed, 33 insertions, 20 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index aa78c0af0cd..057b4b79ebb 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -3079,8 +3079,7 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
* of partial_pathlist because of the way add_partial_path works.
*/
cheapest_partial_path = linitial(rel->partial_pathlist);
- rows =
- cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
+ rows = compute_gather_rows(cheapest_partial_path);
simple_gather_path = (Path *)
create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
NULL, rowsp);
@@ -3098,7 +3097,7 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
if (subpath->pathkeys == NIL)
continue;
- rows = subpath->rows * subpath->parallel_workers;
+ rows = compute_gather_rows(subpath);
path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
subpath->pathkeys, NULL, rowsp);
add_path(rel, &path->path);
@@ -3282,7 +3281,6 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
subpath,
useful_pathkeys,
-1.0);
- rows = subpath->rows * subpath->parallel_workers;
}
else
subpath = (Path *) create_incremental_sort_path(root,
@@ -3291,6 +3289,7 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
useful_pathkeys,
presorted_keys,
-1);
+ rows = compute_gather_rows(subpath);
path = create_gather_merge_path(root, rel,
subpath,
rel->reltarget,
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 2021c481b46..79991b19807 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -6473,3 +6473,21 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
return pages_fetched;
}
+
+/*
+ * compute_gather_rows
+ * Estimate number of rows for gather (merge) nodes.
+ *
+ * In a parallel plan, each worker's row estimate is determined by dividing the
+ * total number of rows by parallel_divisor, which accounts for the leader's
+ * contribution in addition to the number of workers. Accordingly, when
+ * estimating the number of rows for gather (merge) nodes, we multiply the rows
+ * per worker by the same parallel_divisor to undo the division.
+ */
+double
+compute_gather_rows(Path *path)
+{
+ Assert(path->parallel_workers > 0);
+
+ return clamp_row_est(path->rows * get_parallel_divisor(path));
+}
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 4711f912390..948afd90948 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -5370,8 +5370,7 @@ create_ordered_paths(PlannerInfo *root,
root->sort_pathkeys,
presorted_keys,
limit_tuples);
- total_groups = input_path->rows *
- input_path->parallel_workers;
+ total_groups = compute_gather_rows(sorted_path);
sorted_path = (Path *)
create_gather_merge_path(root, ordered_rel,
sorted_path,
@@ -7543,8 +7542,6 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
(presorted_keys == 0 || !enable_incremental_sort))
continue;
- total_groups = path->rows * path->parallel_workers;
-
/*
* We've no need to consider both a sort and incremental sort. We'll
* just do a sort if there are no presorted keys and an incremental
@@ -7561,7 +7558,7 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
groupby_pathkeys,
presorted_keys,
-1.0);
-
+ total_groups = compute_gather_rows(path);
path = (Path *)
create_gather_merge_path(root,
rel,
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index c42742d2c7b..d1c4e1a6aa7 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1899,7 +1899,6 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
pathnode->num_workers = subpath->parallel_workers;
pathnode->path.pathkeys = pathkeys;
pathnode->path.pathtarget = target ? target : rel->reltarget;
- pathnode->path.rows += subpath->rows;
if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
{
diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h
index b1c51a4e70f..57861bfb446 100644
--- a/src/include/optimizer/cost.h
+++ b/src/include/optimizer/cost.h
@@ -212,5 +212,6 @@ extern PathTarget *set_pathtarget_cost_width(PlannerInfo *root, PathTarget *targ
extern double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
Path *bitmapqual, double loop_count,
Cost *cost_p, double *tuples_p);
+extern double compute_gather_rows(Path *path);
#endif /* COST_H */
diff --git a/src/test/regress/expected/join_hash.out b/src/test/regress/expected/join_hash.out
index 262fa71ed8d..4fc34a0e72a 100644
--- a/src/test/regress/expected/join_hash.out
+++ b/src/test/regress/expected/join_hash.out
@@ -508,18 +508,17 @@ set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
- QUERY PLAN
------------------------------------------------------------------------
- Finalize Aggregate
+ QUERY PLAN
+-----------------------------------------------------------------
+ Aggregate
-> Gather
Workers Planned: 1
- -> Partial Aggregate
- -> Parallel Hash Join
- Hash Cond: (r.id = s.id)
- -> Parallel Seq Scan on simple r
- -> Parallel Hash
- -> Parallel Seq Scan on extremely_skewed s
-(9 rows)
+ -> Parallel Hash Join
+ Hash Cond: (r.id = s.id)
+ -> Parallel Seq Scan on simple r
+ -> Parallel Hash
+ -> Parallel Seq Scan on extremely_skewed s
+(8 rows)
select count(*) from simple r join extremely_skewed s using (id);
count