aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/plan/createplan.c
diff options
context:
space:
mode:
authorRichard Guo <rguo@postgresql.org>2024-10-09 17:14:42 +0900
committerRichard Guo <rguo@postgresql.org>2024-10-09 17:14:42 +0900
commit828e94c9d2fd87c06a75354361543119d9937068 (patch)
tree9f03e159ac17d60cd350b5050141f3f7533740fa /src/backend/optimizer/plan/createplan.c
parentc4528fdfa903c74cf99837a554bd3c7e115bb366 (diff)
downloadpostgresql-828e94c9d2fd87c06a75354361543119d9937068.tar.gz
postgresql-828e94c9d2fd87c06a75354361543119d9937068.zip
Consider explicit incremental sort for mergejoins
For a mergejoin, if the given outer path or inner path is not already well enough ordered, we need to do an explicit sort. Currently, we only consider explicit full sort and do not account for incremental sort. In this patch, for the outer path of a mergejoin, we choose to use explicit incremental sort if it is enabled and there are presorted keys. For the inner path, though, we cannot use incremental sort because it does not support mark/restore at present. The rationale is based on the assumption that incremental sort is always faster than full sort when there are presorted keys, a premise that has been applied in various parts of the code. In addition, the current cost model tends to favor incremental sort as being cheaper than full sort in the presence of presorted keys, making it reasonable not to consider full sort in such cases. It could be argued that what if a mergejoin with an incremental sort as the outer path is selected as the inner path of another mergejoin. However, this should not be a problem, because mergejoin itself does not support mark/restore either, and we will add a Material node on top of it anyway in this case (see final_cost_mergejoin). There is one ensuing plan change in the regression tests, and we have to modify that test case to ensure that it continues to test what it is intended to. No backpatch as this could result in plan changes. Author: Richard Guo Reviewed-by: David Rowley, Tomas Vondra Discussion: https://postgr.es/m/CAMbWs49x425QrX7h=Ux05WEnt8GS757H-jOP3_xsX5t1FoUsZw@mail.gmail.com
Diffstat (limited to 'src/backend/optimizer/plan/createplan.c')
-rw-r--r--src/backend/optimizer/plan/createplan.c89
1 files changed, 81 insertions, 8 deletions
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index bb45ef318fb..0d195a07ffc 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -179,6 +179,8 @@ static void copy_generic_path_info(Plan *dest, Path *src);
static void copy_plan_costsize(Plan *dest, Plan *src);
static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
double limit_tuples);
+static void label_incrementalsort_with_costsize(PlannerInfo *root, IncrementalSort *plan,
+ List *pathkeys, double limit_tuples);
static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
TableSampleClause *tsc);
@@ -4523,12 +4525,51 @@ create_mergejoin_plan(PlannerInfo *root,
if (best_path->outersortkeys)
{
Relids outer_relids = outer_path->parent->relids;
- Sort *sort = make_sort_from_pathkeys(outer_plan,
+ Plan *sort_plan;
+ bool use_incremental_sort = false;
+ int presorted_keys;
+
+ /*
+ * We choose to use incremental sort if it is enabled and there are
+ * presorted keys; otherwise we use full sort.
+ */
+ if (enable_incremental_sort)
+ {
+ bool is_sorted PG_USED_FOR_ASSERTS_ONLY;
+
+ is_sorted = pathkeys_count_contained_in(best_path->outersortkeys,
+ outer_path->pathkeys,
+ &presorted_keys);
+ Assert(!is_sorted);
+
+ if (presorted_keys > 0)
+ use_incremental_sort = true;
+ }
+
+ if (!use_incremental_sort)
+ {
+ sort_plan = (Plan *)
+ make_sort_from_pathkeys(outer_plan,
+ best_path->outersortkeys,
+ outer_relids);
+
+ label_sort_with_costsize(root, (Sort *) sort_plan, -1.0);
+ }
+ else
+ {
+ sort_plan = (Plan *)
+ make_incrementalsort_from_pathkeys(outer_plan,
best_path->outersortkeys,
- outer_relids);
+ outer_relids,
+ presorted_keys);
- label_sort_with_costsize(root, sort, -1.0);
- outer_plan = (Plan *) sort;
+ label_incrementalsort_with_costsize(root,
+ (IncrementalSort *) sort_plan,
+ best_path->outersortkeys,
+ -1.0);
+ }
+
+ outer_plan = sort_plan;
outerpathkeys = best_path->outersortkeys;
}
else
@@ -4536,6 +4577,11 @@ create_mergejoin_plan(PlannerInfo *root,
if (best_path->innersortkeys)
{
+ /*
+ * We do not consider incremental sort for inner path, because
+ * incremental sort does not support mark/restore.
+ */
+
Relids inner_relids = inner_path->parent->relids;
Sort *sort = make_sort_from_pathkeys(inner_plan,
best_path->innersortkeys,
@@ -5447,10 +5493,6 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
Plan *lefttree = plan->plan.lefttree;
Path sort_path; /* dummy for result of cost_sort */
- /*
- * This function shouldn't have to deal with IncrementalSort plans because
- * they are only created from corresponding Path nodes.
- */
Assert(IsA(plan, Sort));
cost_sort(&sort_path, root, NIL,
@@ -5470,6 +5512,37 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
}
/*
+ * Same as label_sort_with_costsize, but labels the IncrementalSort node
+ * instead.
+ */
+static void
+label_incrementalsort_with_costsize(PlannerInfo *root, IncrementalSort *plan,
+ List *pathkeys, double limit_tuples)
+{
+ Plan *lefttree = plan->sort.plan.lefttree;
+ Path sort_path; /* dummy for result of cost_incremental_sort */
+
+ Assert(IsA(plan, IncrementalSort));
+
+ cost_incremental_sort(&sort_path, root, pathkeys,
+ plan->nPresortedCols,
+ plan->sort.plan.disabled_nodes,
+ lefttree->startup_cost,
+ lefttree->total_cost,
+ lefttree->plan_rows,
+ lefttree->plan_width,
+ 0.0,
+ work_mem,
+ limit_tuples);
+ plan->sort.plan.startup_cost = sort_path.startup_cost;
+ plan->sort.plan.total_cost = sort_path.total_cost;
+ plan->sort.plan.plan_rows = lefttree->plan_rows;
+ plan->sort.plan.plan_width = lefttree->plan_width;
+ plan->sort.plan.parallel_aware = false;
+ plan->sort.plan.parallel_safe = lefttree->parallel_safe;
+}
+
+/*
* bitmap_subplan_mark_shared
* Set isshared flag in bitmap subplan so that it will be created in
* shared memory.