aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/optimizer/path/costsize.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 16ef348f408..d8db0b29e1f 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -100,6 +100,13 @@
#define LOG2(x) (log(x) / 0.693147180559945)
+/*
+ * Append and MergeAppend nodes are less expensive than some other operations
+ * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
+ * per-tuple cost as cpu_tuple_cost multiplied by this value.
+ */
+#define APPEND_CPU_COST_MULTIPLIER 0.5
+
double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
@@ -1828,10 +1835,6 @@ append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
/*
* cost_append
* Determines and returns the cost of an Append node.
- *
- * We charge nothing extra for the Append itself, which perhaps is too
- * optimistic, but since it doesn't do any selection or projection, it is a
- * pretty cheap node.
*/
void
cost_append(AppendPath *apath)
@@ -1914,6 +1917,13 @@ cost_append(AppendPath *apath)
apath->first_partial_path,
apath->path.parallel_workers);
}
+
+ /*
+ * Although Append does not do any selection or projection, it's not free;
+ * add a small per-tuple overhead.
+ */
+ apath->path.total_cost +=
+ cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
}
/*
@@ -1968,12 +1978,10 @@ cost_merge_append(Path *path, PlannerInfo *root,
run_cost += tuples * comparison_cost * logN;
/*
- * Also charge a small amount (arbitrarily set equal to operator cost) per
- * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
- * node doesn't do qual-checking or projection, so it has less overhead
- * than most plan nodes.
+ * Although MergeAppend does not do any selection or projection, it's not
+ * free; add a small per-tuple overhead.
*/
- run_cost += cpu_operator_cost * tuples;
+ run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
path->startup_cost = startup_cost + input_startup_cost;
path->total_cost = startup_cost + run_cost + input_total_cost;