aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/path/costsize.c
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2017-02-15 13:53:24 -0500
committerRobert Haas <rhaas@postgresql.org>2017-02-15 13:53:24 -0500
commit5262f7a4fc44f651241d2ff1fa688dd664a34874 (patch)
tree8bccf6ebd560f5e081cbba37efa0ecb40d017fd2 /src/backend/optimizer/path/costsize.c
parent51ee6f3160d2e1515ed6197594bda67eb99dc2cc (diff)
downloadpostgresql-5262f7a4fc44f651241d2ff1fa688dd664a34874.tar.gz
postgresql-5262f7a4fc44f651241d2ff1fa688dd664a34874.zip
Add optimizer and executor support for parallel index scans.
In combination with 569174f1be92be93f5366212cc46960d28a5c5cd, which taught the btree AM how to perform parallel index scans, this allows parallel index scan plans on btree indexes. This infrastructure should be general enough to support parallel index scans for other index AMs as well, if someone updates them to support parallel scans. Amit Kapila, reviewed and tested by Anastasia Lubennikova, Tushar Ahuja, and Haribabu Kommi, and me.
Diffstat (limited to 'src/backend/optimizer/path/costsize.c')
-rw-r--r--src/backend/optimizer/path/costsize.c53
1 files changed, 49 insertions, 4 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index a43daa744c7..d01630f8dba 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -391,7 +391,8 @@ cost_gather(GatherPath *path, PlannerInfo *root,
* we have to fetch from the table, so they don't reduce the scan cost.
*/
void
-cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
+cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
+ bool partial_path)
{
IndexOptInfo *index = path->indexinfo;
RelOptInfo *baserel = index->rel;
@@ -400,6 +401,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
List *qpquals;
Cost startup_cost = 0;
Cost run_cost = 0;
+ Cost cpu_run_cost = 0;
Cost indexStartupCost;
Cost indexTotalCost;
Selectivity indexSelectivity;
@@ -413,6 +415,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
Cost cpu_per_tuple;
double tuples_fetched;
double pages_fetched;
+ double rand_heap_pages;
+ double index_pages;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo) &&
@@ -459,7 +463,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
amcostestimate = (amcostestimate_function) index->amcostestimate;
amcostestimate(root, path, loop_count,
&indexStartupCost, &indexTotalCost,
- &indexSelectivity, &indexCorrelation);
+ &indexSelectivity, &indexCorrelation,
+ &index_pages);
/*
* Save amcostestimate's results for possible use in bitmap scan planning.
@@ -526,6 +531,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
if (indexonly)
pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+ rand_heap_pages = pages_fetched;
+
max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
/*
@@ -564,6 +571,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
if (indexonly)
pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+ rand_heap_pages = pages_fetched;
+
/* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
max_IO_cost = pages_fetched * spc_random_page_cost;
@@ -583,6 +592,29 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
min_IO_cost = 0;
}
+ if (partial_path)
+ {
+ /*
+ * Estimate the number of parallel workers required to scan index. Use
+ * the number of heap pages computed considering heap fetches won't be
+ * sequential as for parallel scans the pages are accessed in random
+ * order.
+ */
+ path->path.parallel_workers = compute_parallel_worker(baserel,
+ (BlockNumber) rand_heap_pages,
+ (BlockNumber) index_pages);
+
+ /*
+ * Fall out if workers can't be assigned for parallel scan, because in
+ * such a case this path will be rejected. So there is no benefit in
+ * doing extra computation.
+ */
+ if (path->path.parallel_workers <= 0)
+ return;
+
+ path->path.parallel_aware = true;
+ }
+
/*
* Now interpolate based on estimated index order correlation to get total
* disk I/O cost for main table accesses.
@@ -602,11 +634,24 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
startup_cost += qpqual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
- run_cost += cpu_per_tuple * tuples_fetched;
+ cpu_run_cost += cpu_per_tuple * tuples_fetched;
/* tlist eval costs are paid per output row, not per tuple scanned */
startup_cost += path->path.pathtarget->cost.startup;
- run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+ cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+
+ /* Adjust costing for parallelism, if used. */
+ if (path->path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->path);
+
+ path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
+
+ /* The CPU cost is divided among all the workers. */
+ cpu_run_cost /= parallel_divisor;
+ }
+
+ run_cost += cpu_run_cost;
path->path.startup_cost = startup_cost;
path->path.total_cost = startup_cost + run_cost;