aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/allpaths.c4
-rw-r--r--src/backend/optimizer/path/costsize.c178
-rw-r--r--src/backend/optimizer/path/pathkeys.c72
-rw-r--r--src/backend/optimizer/plan/createplan.c120
-rw-r--r--src/backend/optimizer/plan/planner.c85
-rw-r--r--src/backend/optimizer/plan/setrefs.c1
-rw-r--r--src/backend/optimizer/plan/subselect.c1
-rw-r--r--src/backend/optimizer/util/pathnode.c51
8 files changed, 455 insertions, 57 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 905bbe77d8d..ccf46dd0aab 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -3881,6 +3881,10 @@ print_path(PlannerInfo *root, Path *path, int indent)
ptype = "Sort";
subpath = ((SortPath *) path)->subpath;
break;
+ case T_IncrementalSortPath:
+ ptype = "IncrementalSort";
+ subpath = ((SortPath *) path)->subpath;
+ break;
case T_GroupPath:
ptype = "Group";
subpath = ((GroupPath *) path)->subpath;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 9e7e57f118f..0eef5d7707e 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -128,6 +128,7 @@ bool enable_indexonlyscan = true;
bool enable_bitmapscan = true;
bool enable_tidscan = true;
bool enable_sort = true;
+bool enable_incrementalsort = true;
bool enable_hashagg = true;
bool enable_hashagg_disk = true;
bool enable_groupingsets_hash_disk = false;
@@ -1648,9 +1649,9 @@ cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
}
/*
- * cost_sort
- * Determines and returns the cost of sorting a relation, including
- * the cost of reading the input data.
+ * cost_tuplesort
+ * Determines and returns the cost of sorting a relation using tuplesort,
+ * not including the cost of reading the input data.
*
* If the total volume of data to sort is less than sort_mem, we will do
* an in-memory sort, which requires no I/O and about t*log2(t) tuple
@@ -1677,39 +1678,23 @@ cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
* specifying nonzero comparison_cost; typically that's used for any extra
* work that has to be done to prepare the inputs to the comparison operators.
*
- * 'pathkeys' is a list of sort keys
- * 'input_cost' is the total cost for reading the input data
* 'tuples' is the number of tuples in the relation
* 'width' is the average tuple width in bytes
* 'comparison_cost' is the extra cost per comparison, if any
* 'sort_mem' is the number of kilobytes of work memory allowed for the sort
* 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
- *
- * NOTE: some callers currently pass NIL for pathkeys because they
- * can't conveniently supply the sort keys. Since this routine doesn't
- * currently do anything with pathkeys anyway, that doesn't matter...
- * but if it ever does, it should react gracefully to lack of key data.
- * (Actually, the thing we'd most likely be interested in is just the number
- * of sort keys, which all callers *could* supply.)
*/
-void
-cost_sort(Path *path, PlannerInfo *root,
- List *pathkeys, Cost input_cost, double tuples, int width,
- Cost comparison_cost, int sort_mem,
- double limit_tuples)
+static void
+cost_tuplesort(Cost *startup_cost, Cost *run_cost,
+ double tuples, int width,
+ Cost comparison_cost, int sort_mem,
+ double limit_tuples)
{
- Cost startup_cost = input_cost;
- Cost run_cost = 0;
double input_bytes = relation_byte_size(tuples, width);
double output_bytes;
double output_tuples;
long sort_mem_bytes = sort_mem * 1024L;
- if (!enable_sort)
- startup_cost += disable_cost;
-
- path->rows = tuples;
-
/*
* We want to be sure the cost of a sort is never estimated as zero, even
* if passed-in tuple count is zero. Besides, mustn't do log(0)...
@@ -1748,7 +1733,7 @@ cost_sort(Path *path, PlannerInfo *root,
*
* Assume about N log2 N comparisons
*/
- startup_cost += comparison_cost * tuples * LOG2(tuples);
+ *startup_cost = comparison_cost * tuples * LOG2(tuples);
/* Disk costs */
@@ -1759,7 +1744,7 @@ cost_sort(Path *path, PlannerInfo *root,
log_runs = 1.0;
npageaccesses = 2.0 * npages * log_runs;
/* Assume 3/4ths of accesses are sequential, 1/4th are not */
- startup_cost += npageaccesses *
+ *startup_cost += npageaccesses *
(seq_page_cost * 0.75 + random_page_cost * 0.25);
}
else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
@@ -1770,12 +1755,12 @@ cost_sort(Path *path, PlannerInfo *root,
* factor is a bit higher than for quicksort. Tweak it so that the
* cost curve is continuous at the crossover point.
*/
- startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
+ *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
}
else
{
/* We'll use plain quicksort on all the input tuples */
- startup_cost += comparison_cost * tuples * LOG2(tuples);
+ *startup_cost = comparison_cost * tuples * LOG2(tuples);
}
/*
@@ -1786,8 +1771,143 @@ cost_sort(Path *path, PlannerInfo *root,
* here --- the upper LIMIT will pro-rate the run cost so we'd be double
* counting the LIMIT otherwise.
*/
- run_cost += cpu_operator_cost * tuples;
+ *run_cost = cpu_operator_cost * tuples;
+}
+
+/*
+ * cost_incremental_sort
+ * Determines and returns the cost of sorting a relation incrementally, when
+ * the input path is presorted by a prefix of the pathkeys.
+ *
+ * 'presorted_keys' is the number of leading pathkeys by which the input path
+ * is sorted.
+ *
+ * We estimate the number of groups into which the relation is divided by the
+ * leading pathkeys, and then calculate the cost of sorting a single group
+ * with tuplesort using cost_tuplesort().
+ */
+void
+cost_incremental_sort(Path *path,
+ PlannerInfo *root, List *pathkeys, int presorted_keys,
+ Cost input_startup_cost, Cost input_total_cost,
+ double input_tuples, int width, Cost comparison_cost, int sort_mem,
+ double limit_tuples)
+{
+ Cost startup_cost = 0,
+ run_cost = 0,
+ input_run_cost = input_total_cost - input_startup_cost;
+ double group_tuples,
+ input_groups;
+ Cost group_startup_cost,
+ group_run_cost,
+ group_input_run_cost;
+ List *presortedExprs = NIL;
+ ListCell *l;
+ int i = 0;
+
+ Assert(presorted_keys != 0);
+
+ /*
+ * We want to be sure the cost of a sort is never estimated as zero, even
+ * if passed-in tuple count is zero. Besides, mustn't do log(0)...
+ */
+ if (input_tuples < 2.0)
+ input_tuples = 2.0;
+
+ /* Extract presorted keys as list of expressions */
+ foreach(l, pathkeys)
+ {
+ PathKey *key = (PathKey *) lfirst(l);
+ EquivalenceMember *member = (EquivalenceMember *)
+ linitial(key->pk_eclass->ec_members);
+
+ presortedExprs = lappend(presortedExprs, member->em_expr);
+
+ i++;
+ if (i >= presorted_keys)
+ break;
+ }
+
+ /* Estimate number of groups with equal presorted keys */
+ input_groups = estimate_num_groups(root, presortedExprs, input_tuples, NULL);
+ group_tuples = input_tuples / input_groups;
+ group_input_run_cost = input_run_cost / input_groups;
+
+ /*
+ * Estimate average cost of sorting of one group where presorted keys are
+ * equal. Incremental sort is sensitive to distribution of tuples to the
+ * groups, where we're relying on quite rough assumptions. Thus, we're
+ * pessimistic about incremental sort performance and increase its average
+ * group size by half.
+ */
+ cost_tuplesort(&group_startup_cost, &group_run_cost,
+ 1.5 * group_tuples, width, comparison_cost, sort_mem,
+ limit_tuples);
+
+ /*
+ * Startup cost of incremental sort is the startup cost of its first group
+ * plus the cost of its input.
+ */
+ startup_cost += group_startup_cost
+ + input_startup_cost + group_input_run_cost;
+
+ /*
+ * After we started producing tuples from the first group, the cost of
+ * producing all the tuples is given by the cost to finish processing this
+ * group, plus the total cost to process the remaining groups, plus the
+ * remaining cost of input.
+ */
+ run_cost += group_run_cost
+ + (group_run_cost + group_startup_cost) * (input_groups - 1)
+ + group_input_run_cost * (input_groups - 1);
+
+ /*
+ * Incremental sort adds some overhead by itself. Firstly, it has to
+ * detect the sort groups. This is roughly equal to one extra copy and
+ * comparison per tuple. Secondly, it has to reset the tuplesort context
+ * for every group.
+ */
+ run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
+ run_cost += 2.0 * cpu_tuple_cost * input_groups;
+ path->rows = input_tuples;
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_sort
+ * Determines and returns the cost of sorting a relation, including
+ * the cost of reading the input data.
+ *
+ * NOTE: some callers currently pass NIL for pathkeys because they
+ * can't conveniently supply the sort keys. Since this routine doesn't
+ * currently do anything with pathkeys anyway, that doesn't matter...
+ * but if it ever does, it should react gracefully to lack of key data.
+ * (Actually, the thing we'd most likely be interested in is just the number
+ * of sort keys, which all callers *could* supply.)
+ */
+void
+cost_sort(Path *path, PlannerInfo *root,
+ List *pathkeys, Cost input_cost, double tuples, int width,
+ Cost comparison_cost, int sort_mem,
+ double limit_tuples)
+
+{
+ Cost startup_cost;
+ Cost run_cost;
+
+ cost_tuplesort(&startup_cost, &run_cost,
+ tuples, width,
+ comparison_cost, sort_mem,
+ limit_tuples);
+
+ if (!enable_sort)
+ startup_cost += disable_cost;
+
+ startup_cost += input_cost;
+
+ path->rows = tuples;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 71b9d42c993..21e3f5a987c 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -335,6 +335,60 @@ pathkeys_contained_in(List *keys1, List *keys2)
}
/*
+ * pathkeys_count_contained_in
+ * Same as pathkeys_contained_in, but also sets length of longest
+ * common prefix of keys1 and keys2.
+ */
+bool
+pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
+{
+ int n = 0;
+ ListCell *key1,
+ *key2;
+
+ /*
+ * See if we can avoiding looping through both lists. This optimization
+ * gains us several percent in planning time in a worst-case test.
+ */
+ if (keys1 == keys2)
+ {
+ *n_common = list_length(keys1);
+ return true;
+ }
+ else if (keys1 == NIL)
+ {
+ *n_common = 0;
+ return true;
+ }
+ else if (keys2 == NIL)
+ {
+ *n_common = 0;
+ return false;
+ }
+
+ /*
+ * If both lists are non-empty, iterate through both to find out how many
+ * items are shared.
+ */
+ forboth(key1, keys1, key2, keys2)
+ {
+ PathKey *pathkey1 = (PathKey *) lfirst(key1);
+ PathKey *pathkey2 = (PathKey *) lfirst(key2);
+
+ if (pathkey1 != pathkey2)
+ {
+ *n_common = n;
+ return false;
+ }
+ n++;
+ }
+
+ /* If we ended with a null value, then we've processed the whole list. */
+ *n_common = n;
+ return (key1 == NULL);
+}
+
+/*
* get_cheapest_path_for_pathkeys
* Find the cheapest path (according to the specified criterion) that
* satisfies the given pathkeys and parameterization.
@@ -1786,26 +1840,26 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey)
* Count the number of pathkeys that are useful for meeting the
* query's requested output ordering.
*
- * Unlike merge pathkeys, this is an all-or-nothing affair: it does us
- * no good to order by just the first key(s) of the requested ordering.
- * So the result is always either 0 or list_length(root->query_pathkeys).
+ * Because we the have the possibility of incremental sort, a prefix list of
+ * keys is potentially useful for improving the performance of the requested
+ * ordering. Thus we return 0, if no valuable keys are found, or the number
+ * of leading keys shared by the list and the requested ordering..
*/
static int
pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
{
+ int n_common_pathkeys;
+
if (root->query_pathkeys == NIL)
return 0; /* no special ordering requested */
if (pathkeys == NIL)
return 0; /* unordered path */
- if (pathkeys_contained_in(root->query_pathkeys, pathkeys))
- {
- /* It's useful ... or at least the first N keys are */
- return list_length(root->query_pathkeys);
- }
+ (void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
+ &n_common_pathkeys);
- return 0; /* path ordering not useful */
+ return n_common_pathkeys;
}
/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index fc25908dc61..6d26bfbeb5f 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -98,6 +98,8 @@ static Plan *create_projection_plan(PlannerInfo *root,
int flags);
static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
+static IncrementalSort *create_incrementalsort_plan(PlannerInfo *root,
+ IncrementalSortPath *best_path, int flags);
static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
static Unique *create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path,
int flags);
@@ -244,6 +246,10 @@ static MergeJoin *make_mergejoin(List *tlist,
static Sort *make_sort(Plan *lefttree, int numCols,
AttrNumber *sortColIdx, Oid *sortOperators,
Oid *collations, bool *nullsFirst);
+static IncrementalSort *make_incrementalsort(Plan *lefttree,
+ int numCols, int nPresortedCols,
+ AttrNumber *sortColIdx, Oid *sortOperators,
+ Oid *collations, bool *nullsFirst);
static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
Relids relids,
const AttrNumber *reqColIdx,
@@ -258,6 +264,8 @@ static EquivalenceMember *find_ec_member_for_tle(EquivalenceClass *ec,
Relids relids);
static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
Relids relids);
+static IncrementalSort *make_incrementalsort_from_pathkeys(Plan *lefttree,
+ List *pathkeys, Relids relids, int nPresortedCols);
static Sort *make_sort_from_groupcols(List *groupcls,
AttrNumber *grpColIdx,
Plan *lefttree);
@@ -460,6 +468,11 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
(SortPath *) best_path,
flags);
break;
+ case T_IncrementalSort:
+ plan = (Plan *) create_incrementalsort_plan(root,
+ (IncrementalSortPath *) best_path,
+ flags);
+ break;
case T_Group:
plan = (Plan *) create_group_plan(root,
(GroupPath *) best_path);
@@ -1995,6 +2008,32 @@ create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
}
/*
+ * create_incrementalsort_plan
+ *
+ * Do the same as create_sort_plan, but create IncrementalSort plan.
+ */
+static IncrementalSort *
+create_incrementalsort_plan(PlannerInfo *root, IncrementalSortPath *best_path,
+ int flags)
+{
+ IncrementalSort *plan;
+ Plan *subplan;
+
+ /* See comments in create_sort_plan() above */
+ subplan = create_plan_recurse(root, best_path->spath.subpath,
+ flags | CP_SMALL_TLIST);
+ plan = make_incrementalsort_from_pathkeys(subplan,
+ best_path->spath.path.pathkeys,
+ IS_OTHER_REL(best_path->spath.subpath->parent) ?
+ best_path->spath.path.parent->relids : NULL,
+ best_path->nPresortedCols);
+
+ copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
* create_group_plan
*
* Create a Group plan for 'best_path' and (recursively) plans
@@ -5090,6 +5129,12 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
Plan *lefttree = plan->plan.lefttree;
Path sort_path; /* dummy for result of cost_sort */
+ /*
+ * This function shouldn't have to deal with IncrementalSort plans because
+ * they are only created from corresponding Path nodes.
+ */
+ Assert(IsA(plan, Sort));
+
cost_sort(&sort_path, root, NIL,
lefttree->total_cost,
lefttree->plan_rows,
@@ -5677,9 +5722,12 @@ make_sort(Plan *lefttree, int numCols,
AttrNumber *sortColIdx, Oid *sortOperators,
Oid *collations, bool *nullsFirst)
{
- Sort *node = makeNode(Sort);
- Plan *plan = &node->plan;
+ Sort *node;
+ Plan *plan;
+ node = makeNode(Sort);
+
+ plan = &node->plan;
plan->targetlist = lefttree->targetlist;
plan->qual = NIL;
plan->lefttree = lefttree;
@@ -5694,6 +5742,37 @@ make_sort(Plan *lefttree, int numCols,
}
/*
+ * make_incrementalsort --- basic routine to build an IncrementalSort plan node
+ *
+ * Caller must have built the sortColIdx, sortOperators, collations, and
+ * nullsFirst arrays already.
+ */
+static IncrementalSort *
+make_incrementalsort(Plan *lefttree, int numCols, int nPresortedCols,
+ AttrNumber *sortColIdx, Oid *sortOperators,
+ Oid *collations, bool *nullsFirst)
+{
+ IncrementalSort *node;
+ Plan *plan;
+
+ node = makeNode(IncrementalSort);
+
+ plan = &node->sort.plan;
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+ node->nPresortedCols = nPresortedCols;
+ node->sort.numCols = numCols;
+ node->sort.sortColIdx = sortColIdx;
+ node->sort.sortOperators = sortOperators;
+ node->sort.collations = collations;
+ node->sort.nullsFirst = nullsFirst;
+
+ return node;
+}
+
+/*
* prepare_sort_from_pathkeys
* Prepare to sort according to given pathkeys
*
@@ -6040,6 +6119,42 @@ make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids)
}
/*
+ * make_incrementalsort_from_pathkeys
+ * Create sort plan to sort according to given pathkeys
+ *
+ * 'lefttree' is the node which yields input tuples
+ * 'pathkeys' is the list of pathkeys by which the result is to be sorted
+ * 'relids' is the set of relations required by prepare_sort_from_pathkeys()
+ * 'nPresortedCols' is the number of presorted columns in input tuples
+ */
+static IncrementalSort *
+make_incrementalsort_from_pathkeys(Plan *lefttree, List *pathkeys,
+ Relids relids, int nPresortedCols)
+{
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /* Compute sort column info, and adjust lefttree as needed */
+ lefttree = prepare_sort_from_pathkeys(lefttree, pathkeys,
+ relids,
+ NULL,
+ false,
+ &numsortkeys,
+ &sortColIdx,
+ &sortOperators,
+ &collations,
+ &nullsFirst);
+
+ /* Now build the Sort node */
+ return make_incrementalsort(lefttree, numsortkeys, nPresortedCols,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+}
+
+/*
* make_sort_from_sortclauses
* Create sort plan to sort according to given sortclauses
*
@@ -6774,6 +6889,7 @@ is_projection_capable_path(Path *path)
case T_Hash:
case T_Material:
case T_Sort:
+ case T_IncrementalSort:
case T_Unique:
case T_SetOp:
case T_LockRows:
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index f52226ccecc..aeb83841d7a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -4924,13 +4924,16 @@ create_distinct_paths(PlannerInfo *root,
* Build a new upperrel containing Paths for ORDER BY evaluation.
*
* All paths in the result must satisfy the ORDER BY ordering.
- * The only new path we need consider is an explicit sort on the
- * cheapest-total existing path.
+ * The only new paths we need consider are an explicit full sort
+ * and incremental sort on the cheapest-total existing path.
*
* input_rel: contains the source-data Paths
* target: the output tlist the result Paths must emit
* limit_tuples: estimated bound on the number of output tuples,
* or -1 if no LIMIT or couldn't estimate
+ *
+ * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
+ * other pathkeys (grouping, ...) like generate_useful_gather_paths.
*/
static RelOptInfo *
create_ordered_paths(PlannerInfo *root,
@@ -4964,29 +4967,77 @@ create_ordered_paths(PlannerInfo *root,
foreach(lc, input_rel->pathlist)
{
- Path *path = (Path *) lfirst(lc);
+ Path *input_path = (Path *) lfirst(lc);
+ Path *sorted_path = input_path;
bool is_sorted;
+ int presorted_keys;
+
+ is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
+ input_path->pathkeys, &presorted_keys);
- is_sorted = pathkeys_contained_in(root->sort_pathkeys,
- path->pathkeys);
- if (path == cheapest_input_path || is_sorted)
+ if (is_sorted)
{
- if (!is_sorted)
+ /* Use the input path as is, but add a projection step if needed */
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
+
+ add_path(ordered_rel, sorted_path);
+ }
+ else
+ {
+ /*
+ * Try adding an explicit sort, but only to the cheapest total path
+ * since a full sort should generally add the same cost to all
+ * paths.
+ */
+ if (input_path == cheapest_input_path)
{
- /* An explicit sort here can take advantage of LIMIT */
- path = (Path *) create_sort_path(root,
- ordered_rel,
- path,
- root->sort_pathkeys,
- limit_tuples);
+ /*
+ * Sort the cheapest input path. An explicit sort here can
+ * take advantage of LIMIT.
+ */
+ sorted_path = (Path *) create_sort_path(root,
+ ordered_rel,
+ input_path,
+ root->sort_pathkeys,
+ limit_tuples);
+ /* Add projection step if needed */
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
+
+ add_path(ordered_rel, sorted_path);
}
+ /*
+ * If incremental sort is enabled, then try it as well. Unlike with
+ * regular sorts, we can't just look at the cheapest path, because
+ * the cost of incremental sort depends on how well presorted the
+ * path is. Additionally incremental sort may enable a cheaper
+ * startup path to win out despite higher total cost.
+ */
+ if (!enable_incrementalsort)
+ continue;
+
+ /* Likewise, if the path can't be used for incremental sort. */
+ if (!presorted_keys)
+ continue;
+
+ /* Also consider incremental sort. */
+ sorted_path = (Path *) create_incremental_sort_path(root,
+ ordered_rel,
+ input_path,
+ root->sort_pathkeys,
+ presorted_keys,
+ limit_tuples);
+
/* Add projection step if needed */
- if (path->pathtarget != target)
- path = apply_projection_to_path(root, ordered_rel,
- path, target);
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
- add_path(ordered_rel, path);
+ add_path(ordered_rel, sorted_path);
}
}
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 3dcded506be..2b676bf4061 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -678,6 +678,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
case T_Material:
case T_Sort:
+ case T_IncrementalSort:
case T_Unique:
case T_SetOp:
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 3650e8329d5..b02fcb9bfe7 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -2688,6 +2688,7 @@ finalize_plan(PlannerInfo *root, Plan *plan,
case T_Hash:
case T_Material:
case T_Sort:
+ case T_IncrementalSort:
case T_Unique:
case T_SetOp:
case T_Group:
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 8ba8122ee2f..4538ed88e09 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -2754,6 +2754,57 @@ create_set_projection_path(PlannerInfo *root,
}
/*
+ * create_incremental_sort_path
+ * Creates a pathnode that represents performing an incremental sort.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'pathkeys' represents the desired sort order
+ * 'presorted_keys' is the number of keys by which the input path is
+ * already sorted
+ * 'limit_tuples' is the estimated bound on the number of output tuples,
+ * or -1 if no LIMIT or couldn't estimate
+ */
+SortPath *
+create_incremental_sort_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ List *pathkeys,
+ int presorted_keys,
+ double limit_tuples)
+{
+ IncrementalSortPath *sort = makeNode(IncrementalSortPath);
+ SortPath *pathnode = &sort->spath;
+
+ pathnode->path.pathtype = T_IncrementalSort;
+ pathnode->path.parent = rel;
+ /* Sort doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->subpath = subpath;
+
+ cost_incremental_sort(&pathnode->path,
+ root, pathkeys, presorted_keys,
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0, /* XXX comparison_cost shouldn't be 0? */
+ work_mem, limit_tuples);
+
+ sort->nPresortedCols = presorted_keys;
+
+ return pathnode;
+}
+
+/*
* create_sort_path
* Creates a pathnode that represents performing an explicit sort.
*