aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/allpaths.c4
-rw-r--r--src/backend/optimizer/path/costsize.c53
-rw-r--r--src/backend/optimizer/path/indxpath.c67
-rw-r--r--src/backend/optimizer/plan/planner.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c19
-rw-r--r--src/backend/optimizer/util/plancat.c1
6 files changed, 124 insertions, 22 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 85505c57d36..eeacf815e35 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -127,8 +127,6 @@ static void subquery_push_qual(Query *subquery,
static void recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual);
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel);
-static int compute_parallel_worker(RelOptInfo *rel, BlockNumber heap_pages,
- BlockNumber index_pages);
/*
@@ -2885,7 +2883,7 @@ remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel)
* "heap_pages" is the number of pages from the table that we expect to scan.
* "index_pages" is the number of pages from the index that we expect to scan.
*/
-static int
+int
compute_parallel_worker(RelOptInfo *rel, BlockNumber heap_pages,
BlockNumber index_pages)
{
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index a43daa744c7..d01630f8dba 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -391,7 +391,8 @@ cost_gather(GatherPath *path, PlannerInfo *root,
* we have to fetch from the table, so they don't reduce the scan cost.
*/
void
-cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
+cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
+ bool partial_path)
{
IndexOptInfo *index = path->indexinfo;
RelOptInfo *baserel = index->rel;
@@ -400,6 +401,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
List *qpquals;
Cost startup_cost = 0;
Cost run_cost = 0;
+ Cost cpu_run_cost = 0;
Cost indexStartupCost;
Cost indexTotalCost;
Selectivity indexSelectivity;
@@ -413,6 +415,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
Cost cpu_per_tuple;
double tuples_fetched;
double pages_fetched;
+ double rand_heap_pages;
+ double index_pages;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo) &&
@@ -459,7 +463,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
amcostestimate = (amcostestimate_function) index->amcostestimate;
amcostestimate(root, path, loop_count,
&indexStartupCost, &indexTotalCost,
- &indexSelectivity, &indexCorrelation);
+ &indexSelectivity, &indexCorrelation,
+ &index_pages);
/*
* Save amcostestimate's results for possible use in bitmap scan planning.
@@ -526,6 +531,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
if (indexonly)
pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+ rand_heap_pages = pages_fetched;
+
max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
/*
@@ -564,6 +571,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
if (indexonly)
pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+ rand_heap_pages = pages_fetched;
+
/* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
max_IO_cost = pages_fetched * spc_random_page_cost;
@@ -583,6 +592,29 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
min_IO_cost = 0;
}
+ if (partial_path)
+ {
+ /*
+ * Estimate the number of parallel workers required to scan index. Use
+ * the number of heap pages computed considering heap fetches won't be
+ * sequential as for parallel scans the pages are accessed in random
+ * order.
+ */
+ path->path.parallel_workers = compute_parallel_worker(baserel,
+ (BlockNumber) rand_heap_pages,
+ (BlockNumber) index_pages);
+
+ /*
+ * Fall out if workers can't be assigned for parallel scan, because in
+ * such a case this path will be rejected. So there is no benefit in
+ * doing extra computation.
+ */
+ if (path->path.parallel_workers <= 0)
+ return;
+
+ path->path.parallel_aware = true;
+ }
+
/*
* Now interpolate based on estimated index order correlation to get total
* disk I/O cost for main table accesses.
@@ -602,11 +634,24 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
startup_cost += qpqual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
- run_cost += cpu_per_tuple * tuples_fetched;
+ cpu_run_cost += cpu_per_tuple * tuples_fetched;
/* tlist eval costs are paid per output row, not per tuple scanned */
startup_cost += path->path.pathtarget->cost.startup;
- run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+ cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+
+ /* Adjust costing for parallelism, if used. */
+ if (path->path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->path);
+
+ path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
+
+ /* The CPU cost is divided among all the workers. */
+ cpu_run_cost /= parallel_divisor;
+ }
+
+ run_cost += cpu_run_cost;
path->path.startup_cost = startup_cost;
path->path.total_cost = startup_cost + run_cost;
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 52834689881..56eccafd7b0 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -813,7 +813,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
/*
* build_index_paths
* Given an index and a set of index clauses for it, construct zero
- * or more IndexPaths.
+ * or more IndexPaths. It also constructs zero or more partial IndexPaths.
*
* We return a list of paths because (1) this routine checks some cases
* that should cause us to not generate any IndexPath, and (2) in some
@@ -1042,8 +1042,41 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
NoMovementScanDirection,
index_only_scan,
outer_relids,
- loop_count);
+ loop_count,
+ false);
result = lappend(result, ipath);
+
+ /*
+ * If appropriate, consider parallel index scan. We don't allow
+ * parallel index scan for bitmap or index only scans.
+ */
+ if (index->amcanparallel && !index_only_scan &&
+ rel->consider_parallel && outer_relids == NULL &&
+ scantype != ST_BITMAPSCAN)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ clause_columns,
+ orderbyclauses,
+ orderbyclausecols,
+ useful_pathkeys,
+ index_is_ordered ?
+ ForwardScanDirection :
+ NoMovementScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ true);
+
+ /*
+ * if, after costing the path, we find that it's not worth
+ * using parallel workers, just free it.
+ */
+ if (ipath->path.parallel_workers > 0)
+ add_partial_path(rel, (Path *) ipath);
+ else
+ pfree(ipath);
+ }
}
/*
@@ -1066,8 +1099,36 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
BackwardScanDirection,
index_only_scan,
outer_relids,
- loop_count);
+ loop_count,
+ false);
result = lappend(result, ipath);
+
+ /* If appropriate, consider parallel index scan */
+ if (index->amcanparallel && !index_only_scan &&
+ rel->consider_parallel && outer_relids == NULL &&
+ scantype != ST_BITMAPSCAN)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ clause_columns,
+ NIL,
+ NIL,
+ useful_pathkeys,
+ BackwardScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ true);
+
+ /*
+ * if, after costing the path, we find that it's not worth
+ * using parallel workers, just free it.
+ */
+ if (ipath->path.parallel_workers > 0)
+ add_partial_path(rel, (Path *) ipath);
+ else
+ pfree(ipath);
+ }
}
}
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index abb4f12cea1..3d33d469713 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -5333,7 +5333,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
indexScanPath = create_index_path(root, indexInfo,
NIL, NIL, NIL, NIL, NIL,
ForwardScanDirection, false,
- NULL, 1.0);
+ NULL, 1.0, false);
return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
}
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index f440875ceb1..324829690d2 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -744,10 +744,9 @@ add_path_precheck(RelOptInfo *parent_rel,
* As with add_path, we pfree paths that are found to be dominated by
* another partial path; this requires that there be no other references to
* such paths yet. Hence, GatherPaths must not be created for a rel until
- * we're done creating all partial paths for it. We do not currently build
- * partial indexscan paths, so there is no need for an exception for
- * IndexPaths here; for safety, we instead Assert that a path to be freed
- * isn't an IndexPath.
+ * we're done creating all partial paths for it. Unlike add_path, we don't
+ * take an exception for IndexPaths as partial index paths won't be
+ * referenced by partial BitmapHeapPaths.
*/
void
add_partial_path(RelOptInfo *parent_rel, Path *new_path)
@@ -826,8 +825,6 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path)
{
parent_rel->partial_pathlist =
list_delete_cell(parent_rel->partial_pathlist, p1, p1_prev);
- /* we should not see IndexPaths here, so always safe to delete */
- Assert(!IsA(old_path, IndexPath));
pfree(old_path);
/* p1_prev does not advance */
}
@@ -860,8 +857,6 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path)
}
else
{
- /* we should not see IndexPaths here, so always safe to delete */
- Assert(!IsA(new_path, IndexPath));
/* Reject and recycle the new path */
pfree(new_path);
}
@@ -1005,6 +1000,7 @@ create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer
* 'required_outer' is the set of outer relids for a parameterized path.
* 'loop_count' is the number of repetitions of the indexscan to factor into
* estimates of caching behavior.
+ * 'partial_path' is true if constructing a parallel index scan path.
*
* Returns the new path node.
*/
@@ -1019,7 +1015,8 @@ create_index_path(PlannerInfo *root,
ScanDirection indexscandir,
bool indexonly,
Relids required_outer,
- double loop_count)
+ double loop_count,
+ bool partial_path)
{
IndexPath *pathnode = makeNode(IndexPath);
RelOptInfo *rel = index->rel;
@@ -1049,7 +1046,7 @@ create_index_path(PlannerInfo *root,
pathnode->indexorderbycols = indexorderbycols;
pathnode->indexscandir = indexscandir;
- cost_index(pathnode, root, loop_count);
+ cost_index(pathnode, root, loop_count, partial_path);
return pathnode;
}
@@ -3247,7 +3244,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
memcpy(newpath, ipath, sizeof(IndexPath));
newpath->path.param_info =
get_baserel_parampathinfo(root, rel, required_outer);
- cost_index(newpath, root, loop_count);
+ cost_index(newpath, root, loop_count, false);
return (Path *) newpath;
}
case T_BitmapHeapScan:
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 7836e6b3f85..4ed27054a11 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -241,6 +241,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
info->amoptionalkey = amroutine->amoptionalkey;
info->amsearcharray = amroutine->amsearcharray;
info->amsearchnulls = amroutine->amsearchnulls;
+ info->amcanparallel = amroutine->amcanparallel;
info->amhasgettuple = (amroutine->amgettuple != NULL);
info->amhasgetbitmap = (amroutine->amgetbitmap != NULL);
info->amcostestimate = amroutine->amcostestimate;