diff options
Diffstat (limited to 'src/backend/optimizer/plan')
-rw-r--r-- | src/backend/optimizer/plan/createplan.c | 401 | ||||
-rw-r--r-- | src/backend/optimizer/plan/initsplan.c | 231 | ||||
-rw-r--r-- | src/backend/optimizer/plan/planagg.c | 126 | ||||
-rw-r--r-- | src/backend/optimizer/plan/planmain.c | 124 | ||||
-rw-r--r-- | src/backend/optimizer/plan/planner.c | 354 | ||||
-rw-r--r-- | src/backend/optimizer/plan/setrefs.c | 158 | ||||
-rw-r--r-- | src/backend/optimizer/plan/subselect.c | 264 |
7 files changed, 811 insertions, 847 deletions
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index b7af04e1b9f..f0dd6548711 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.200 2005/10/13 00:06:46 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.201 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,10 +50,10 @@ static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path, List *tlist, List *scan_clauses, List **nonlossy_clauses); static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root, - BitmapHeapPath *best_path, - List *tlist, List *scan_clauses); + BitmapHeapPath *best_path, + List *tlist, List *scan_clauses); static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, - List **qual, List **indexqual); + List **qual, List **indexqual); static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path, List *tlist, List *scan_clauses); static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path, @@ -72,7 +72,7 @@ static void fix_indexqual_references(List *indexquals, IndexPath *index_path, List **indexstrategy, List **indexsubtype); static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, - Oid *opclass); + Oid *opclass); static List *get_switched_clauses(List *clauses, Relids outerrelids); static void copy_path_costsize(Plan *dest, Path *src); static void copy_plan_costsize(Plan *dest, Plan *src); @@ -82,15 +82,15 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid, List *indexstrategy, List *indexsubtype, ScanDirection indexscandir); static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid, - List *indexqual, - List *indexqualorig, - List *indexstrategy, - List *indexsubtype); + List *indexqual, + List *indexqualorig, + List *indexstrategy, + List *indexsubtype); static BitmapHeapScan *make_bitmap_heapscan(List *qptlist, - List *qpqual, - Plan *lefttree, - List *bitmapqualorig, - Index scanrelid); + List *qpqual, + Plan *lefttree, + List *bitmapqualorig, + Index scanrelid); static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid, List *tideval); static FunctionScan *make_functionscan(List *qptlist, List *qpqual, @@ -164,7 +164,7 @@ create_plan(PlannerInfo *root, Path *best_path) break; case T_Material: plan = (Plan *) create_material_plan(root, - (MaterialPath *) best_path); + (MaterialPath *) best_path); break; case T_Unique: plan = (Plan *) create_unique_plan(root, @@ -195,12 +195,12 @@ create_scan_plan(PlannerInfo *root, Path *best_path) Scan *plan; /* - * For table scans, rather than using the relation targetlist (which - * is only those Vars actually needed by the query), we prefer to - * generate a tlist containing all Vars in order. This will allow the - * executor to optimize away projection of the table tuples, if - * possible. (Note that planner.c may replace the tlist we generate - * here, forcing projection to occur.) + * For table scans, rather than using the relation targetlist (which is + * only those Vars actually needed by the query), we prefer to generate a + * tlist containing all Vars in order. This will allow the executor to + * optimize away projection of the table tuples, if possible. (Note that + * planner.c may replace the tlist we generate here, forcing projection to + * occur.) */ if (use_physical_tlist(rel)) { @@ -213,8 +213,8 @@ create_scan_plan(PlannerInfo *root, Path *best_path) tlist = build_relation_tlist(rel); /* - * Extract the relevant restriction clauses from the parent relation; - * the executor must apply all these restrictions during the scan. + * Extract the relevant restriction clauses from the parent relation; the + * executor must apply all these restrictions during the scan. */ scan_clauses = rel->baserestrictinfo; @@ -237,7 +237,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path) case T_BitmapHeapScan: plan = (Scan *) create_bitmap_scan_plan(root, - (BitmapHeapPath *) best_path, + (BitmapHeapPath *) best_path, tlist, scan_clauses); break; @@ -308,8 +308,8 @@ use_physical_tlist(RelOptInfo *rel) int i; /* - * OK for subquery and function scans; otherwise, can't do it for - * anything except real relations. + * OK for subquery and function scans; otherwise, can't do it for anything + * except real relations. */ if (rel->rtekind != RTE_RELATION) { @@ -328,9 +328,9 @@ use_physical_tlist(RelOptInfo *rel) return false; /* - * Can't do it if any system columns are requested, either. (This - * could possibly be fixed but would take some fragile assumptions in - * setrefs.c, I think.) + * Can't do it if any system columns are requested, either. (This could + * possibly be fixed but would take some fragile assumptions in setrefs.c, + * I think.) */ for (i = rel->min_attr; i <= 0; i++) { @@ -415,14 +415,14 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path) #ifdef NOT_USED /* - * * Expensive function pullups may have pulled local predicates * - * into this path node. Put them in the qpqual of the plan node. * - * JMH, 6/15/92 + * * Expensive function pullups may have pulled local predicates * into + * this path node. Put them in the qpqual of the plan node. * JMH, + * 6/15/92 */ if (get_loc_restrictinfo(best_path) != NIL) set_qpqual((Plan) plan, list_concat(get_qpqual((Plan) plan), - get_actual_clauses(get_loc_restrictinfo(best_path)))); + get_actual_clauses(get_loc_restrictinfo(best_path)))); #endif return plan; @@ -444,13 +444,13 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path) ListCell *subpaths; /* - * It is possible for the subplans list to contain only one entry, - * or even no entries. Handle these cases specially. + * It is possible for the subplans list to contain only one entry, or even + * no entries. Handle these cases specially. * - * XXX ideally, if there's just one entry, we'd not bother to generate - * an Append node but just return the single child. At the moment this - * does not work because the varno of the child scan plan won't match - * the parent-rel Vars it'll be asked to emit. + * XXX ideally, if there's just one entry, we'd not bother to generate an + * Append node but just return the single child. At the moment this does + * not work because the varno of the child scan plan won't match the + * parent-rel Vars it'll be asked to emit. */ if (best_path->subpaths == NIL) { @@ -618,8 +618,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path) if (newitems) { /* - * If the top plan node can't do projections, we need to add a - * Result node to help it along. + * If the top plan node can't do projections, we need to add a Result + * node to help it along. */ if (!is_projection_capable_plan(subplan)) subplan = (Plan *) make_result(newtlist, NULL, subplan); @@ -628,8 +628,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path) } /* - * Build control information showing which subplan output columns are - * to be examined by the grouping step. Unfortunately we can't merge this + * Build control information showing which subplan output columns are to + * be examined by the grouping step. Unfortunately we can't merge this * with the previous loop, since we didn't then know which version of the * subplan tlist we'd end up using. */ @@ -656,9 +656,9 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path) numGroups = (long) Min(best_path->rows, (double) LONG_MAX); /* - * Since the Agg node is going to project anyway, we can give it - * the minimum output tlist, without any stuff we might have added - * to the subplan tlist. + * Since the Agg node is going to project anyway, we can give it the + * minimum output tlist, without any stuff we might have added to the + * subplan tlist. */ plan = (Plan *) make_agg(root, build_relation_tlist(best_path->path.parent), @@ -776,9 +776,9 @@ create_indexscan_plan(PlannerInfo *root, stripped_indexquals = get_actual_clauses(indexquals); /* - * The executor needs a copy with the indexkey on the left of each - * clause and with index attr numbers substituted for table ones. This - * pass also gets strategy info and looks for "lossy" operators. + * The executor needs a copy with the indexkey on the left of each clause + * and with index attr numbers substituted for table ones. This pass also + * gets strategy info and looks for "lossy" operators. */ fix_indexqual_references(indexquals, best_path, &fixed_indexquals, @@ -792,12 +792,11 @@ create_indexscan_plan(PlannerInfo *root, /* * If this is an innerjoin scan, the indexclauses will contain join - * clauses that are not present in scan_clauses (since the passed-in - * value is just the rel's baserestrictinfo list). We must add these - * clauses to scan_clauses to ensure they get checked. In most cases - * we will remove the join clauses again below, but if a join clause - * contains a special operator, we need to make sure it gets into the - * scan_clauses. + * clauses that are not present in scan_clauses (since the passed-in value + * is just the rel's baserestrictinfo list). We must add these clauses to + * scan_clauses to ensure they get checked. In most cases we will remove + * the join clauses again below, but if a join clause contains a special + * operator, we need to make sure it gets into the scan_clauses. * * Note: pointer comparison should be enough to determine RestrictInfo * matches. @@ -806,25 +805,25 @@ create_indexscan_plan(PlannerInfo *root, scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses); /* - * The qpqual list must contain all restrictions not automatically - * handled by the index. All the predicates in the indexquals will be - * checked (either by the index itself, or by nodeIndexscan.c), but if - * there are any "special" operators involved then they must be included - * in qpqual. Also, any lossy index operators must be rechecked in - * the qpqual. The upshot is that qpqual must contain scan_clauses - * minus whatever appears in nonlossy_indexquals. + * The qpqual list must contain all restrictions not automatically handled + * by the index. All the predicates in the indexquals will be checked + * (either by the index itself, or by nodeIndexscan.c), but if there are + * any "special" operators involved then they must be included in qpqual. + * Also, any lossy index operators must be rechecked in the qpqual. The + * upshot is that qpqual must contain scan_clauses minus whatever appears + * in nonlossy_indexquals. * - * In normal cases simple pointer equality checks will be enough to - * spot duplicate RestrictInfos, so we try that first. In some situations - * (particularly with OR'd index conditions) we may have scan_clauses - * that are not equal to, but are logically implied by, the index quals; - * so we also try a predicate_implied_by() check to see if we can discard - * quals that way. (predicate_implied_by assumes its first input contains - * only immutable functions, so we have to check that.) We can also - * discard quals that are implied by a partial index's predicate. + * In normal cases simple pointer equality checks will be enough to spot + * duplicate RestrictInfos, so we try that first. In some situations + * (particularly with OR'd index conditions) we may have scan_clauses that + * are not equal to, but are logically implied by, the index quals; so we + * also try a predicate_implied_by() check to see if we can discard quals + * that way. (predicate_implied_by assumes its first input contains only + * immutable functions, so we have to check that.) We can also discard + * quals that are implied by a partial index's predicate. * - * While at it, we strip off the RestrictInfos to produce a list of - * plain expressions. + * While at it, we strip off the RestrictInfos to produce a list of plain + * expressions. */ qpqual = NIL; foreach(l, scan_clauses) @@ -836,7 +835,7 @@ create_indexscan_plan(PlannerInfo *root, continue; if (!contain_mutable_functions((Node *) rinfo->clause)) { - List *clausel = list_make1(rinfo->clause); + List *clausel = list_make1(rinfo->clause); if (predicate_implied_by(clausel, nonlossy_indexquals)) continue; @@ -898,13 +897,12 @@ create_bitmap_scan_plan(PlannerInfo *root, scan_clauses = get_actual_clauses(scan_clauses); /* - * If this is a innerjoin scan, the indexclauses will contain join - * clauses that are not present in scan_clauses (since the passed-in - * value is just the rel's baserestrictinfo list). We must add these - * clauses to scan_clauses to ensure they get checked. In most cases - * we will remove the join clauses again below, but if a join clause - * contains a special operator, we need to make sure it gets into the - * scan_clauses. + * If this is a innerjoin scan, the indexclauses will contain join clauses + * that are not present in scan_clauses (since the passed-in value is just + * the rel's baserestrictinfo list). We must add these clauses to + * scan_clauses to ensure they get checked. In most cases we will remove + * the join clauses again below, but if a join clause contains a special + * operator, we need to make sure it gets into the scan_clauses. */ if (best_path->isjoininner) { @@ -912,12 +910,12 @@ create_bitmap_scan_plan(PlannerInfo *root, } /* - * The qpqual list must contain all restrictions not automatically - * handled by the index. All the predicates in the indexquals will be - * checked (either by the index itself, or by nodeBitmapHeapscan.c), - * but if there are any "special" or lossy operators involved then they - * must be added to qpqual. The upshot is that qpquals must contain - * scan_clauses minus whatever appears in indexquals. + * The qpqual list must contain all restrictions not automatically handled + * by the index. All the predicates in the indexquals will be checked + * (either by the index itself, or by nodeBitmapHeapscan.c), but if there + * are any "special" or lossy operators involved then they must be added + * to qpqual. The upshot is that qpquals must contain scan_clauses minus + * whatever appears in indexquals. * * In normal cases simple equal() checks will be enough to spot duplicate * clauses, so we try that first. In some situations (particularly with @@ -930,25 +928,25 @@ create_bitmap_scan_plan(PlannerInfo *root, * * XXX For the moment, we only consider partial index predicates in the * simple single-index-scan case. Is it worth trying to be smart about - * more complex cases? Perhaps create_bitmap_subplan should be made to + * more complex cases? Perhaps create_bitmap_subplan should be made to * include predicate info in what it constructs. */ qpqual = NIL; foreach(l, scan_clauses) { - Node *clause = (Node *) lfirst(l); + Node *clause = (Node *) lfirst(l); if (list_member(indexquals, clause)) continue; if (!contain_mutable_functions(clause)) { - List *clausel = list_make1(clause); + List *clausel = list_make1(clause); if (predicate_implied_by(clausel, indexquals)) continue; if (IsA(best_path->bitmapqual, IndexPath)) { - IndexPath *ipath = (IndexPath *) best_path->bitmapqual; + IndexPath *ipath = (IndexPath *) best_path->bitmapqual; if (predicate_implied_by(clausel, ipath->indexinfo->indpred)) continue; @@ -1010,15 +1008,15 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * There may well be redundant quals among the subplans, since a * top-level WHERE qual might have gotten used to form several - * different index quals. We don't try exceedingly hard to - * eliminate redundancies, but we do eliminate obvious duplicates - * by using list_concat_unique. + * different index quals. We don't try exceedingly hard to eliminate + * redundancies, but we do eliminate obvious duplicates by using + * list_concat_unique. */ foreach(l, apath->bitmapquals) { - Plan *subplan; - List *subqual; - List *subindexqual; + Plan *subplan; + List *subqual; + List *subindexqual; subplan = create_bitmap_subplan(root, (Path *) lfirst(l), &subqual, &subindexqual); @@ -1048,7 +1046,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * Here, we only detect qual-free subplans. A qual-free subplan would * cause us to generate "... OR true ..." which we may as well reduce - * to just "true". We do not try to eliminate redundant subclauses + * to just "true". We do not try to eliminate redundant subclauses * because (a) it's not as likely as in the AND case, and (b) we might * well be working with hundreds or even thousands of OR conditions, * perhaps from a long IN list. The performance of list_append_unique @@ -1056,9 +1054,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, */ foreach(l, opath->bitmapquals) { - Plan *subplan; - List *subqual; - List *subindexqual; + Plan *subplan; + List *subqual; + List *subindexqual; subplan = create_bitmap_subplan(root, (Path *) lfirst(l), &subqual, &subindexqual); @@ -1080,6 +1078,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, plan->plan_rows = clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples); plan->plan_width = 0; /* meaningless */ + /* * If there were constant-TRUE subquals, the OR reduces to constant * TRUE. Also, avoid generating one-element ORs, which could happen @@ -1100,9 +1099,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, } else if (IsA(bitmapqual, IndexPath)) { - IndexPath *ipath = (IndexPath *) bitmapqual; - IndexScan *iscan; - List *nonlossy_clauses; + IndexPath *ipath = (IndexPath *) bitmapqual; + IndexScan *iscan; + List *nonlossy_clauses; /* Use the regular indexscan plan build machinery... */ iscan = create_indexscan_plan(root, ipath, NIL, NIL, @@ -1245,18 +1244,18 @@ create_nestloop_plan(PlannerInfo *root, if (IsA(best_path->innerjoinpath, IndexPath)) { /* - * An index is being used to reduce the number of tuples scanned - * in the inner relation. If there are join clauses being used - * with the index, we may remove those join clauses from the list - * of clauses that have to be checked as qpquals at the join node. + * An index is being used to reduce the number of tuples scanned in + * the inner relation. If there are join clauses being used with the + * index, we may remove those join clauses from the list of clauses + * that have to be checked as qpquals at the join node. * * We can also remove any join clauses that are redundant with those - * being used in the index scan; prior redundancy checks will not - * have caught this case because the join clauses would never have - * been put in the same joininfo list. + * being used in the index scan; prior redundancy checks will not have + * caught this case because the join clauses would never have been put + * in the same joininfo list. * - * We can skip this if the index path is an ordinary indexpath and - * not a special innerjoin path. + * We can skip this if the index path is an ordinary indexpath and not a + * special innerjoin path. */ IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath; @@ -1266,7 +1265,7 @@ create_nestloop_plan(PlannerInfo *root, select_nonredundant_join_clauses(root, joinrestrictclauses, innerpath->indexclauses, - IS_OUTER_JOIN(best_path->jointype)); + IS_OUTER_JOIN(best_path->jointype)); } } else if (IsA(best_path->innerjoinpath, BitmapHeapPath)) @@ -1275,11 +1274,11 @@ create_nestloop_plan(PlannerInfo *root, * Same deal for bitmapped index scans. * * Note: both here and above, we ignore any implicit index restrictions - * associated with the use of partial indexes. This is OK because + * associated with the use of partial indexes. This is OK because * we're only trying to prove we can dispense with some join quals; * failing to prove that doesn't result in an incorrect plan. It is - * the right way to proceed because adding more quals to the stuff - * we got from the original query would just make it harder to detect + * the right way to proceed because adding more quals to the stuff we + * got from the original query would just make it harder to detect * duplication. */ BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath; @@ -1296,7 +1295,7 @@ create_nestloop_plan(PlannerInfo *root, select_nonredundant_join_clauses(root, joinrestrictclauses, bitmapclauses, - IS_OUTER_JOIN(best_path->jointype)); + IS_OUTER_JOIN(best_path->jointype)); } } @@ -1355,18 +1354,18 @@ create_mergejoin_plan(PlannerInfo *root, } /* - * Remove the mergeclauses from the list of join qual clauses, leaving - * the list of quals that must be checked as qpquals. + * Remove the mergeclauses from the list of join qual clauses, leaving the + * list of quals that must be checked as qpquals. */ mergeclauses = get_actual_clauses(best_path->path_mergeclauses); joinclauses = list_difference(joinclauses, mergeclauses); /* - * Rearrange mergeclauses, if needed, so that the outer variable is - * always on the left. + * Rearrange mergeclauses, if needed, so that the outer variable is always + * on the left. */ mergeclauses = get_switched_clauses(best_path->path_mergeclauses, - best_path->jpath.outerjoinpath->parent->relids); + best_path->jpath.outerjoinpath->parent->relids); /* Sort clauses into best execution order */ /* NB: do NOT reorder the mergeclauses */ @@ -1375,8 +1374,8 @@ create_mergejoin_plan(PlannerInfo *root, /* * Create explicit sort nodes for the outer and inner join paths if - * necessary. The sort cost was already accounted for in the path. - * Make sure there are no excess columns in the inputs if sorting. + * necessary. The sort cost was already accounted for in the path. Make + * sure there are no excess columns in the inputs if sorting. */ if (best_path->outersortkeys) { @@ -1439,18 +1438,18 @@ create_hashjoin_plan(PlannerInfo *root, } /* - * Remove the hashclauses from the list of join qual clauses, leaving - * the list of quals that must be checked as qpquals. + * Remove the hashclauses from the list of join qual clauses, leaving the + * list of quals that must be checked as qpquals. */ hashclauses = get_actual_clauses(best_path->path_hashclauses); joinclauses = list_difference(joinclauses, hashclauses); /* - * Rearrange hashclauses, if needed, so that the outer variable is - * always on the left. + * Rearrange hashclauses, if needed, so that the outer variable is always + * on the left. */ hashclauses = get_switched_clauses(best_path->path_hashclauses, - best_path->jpath.outerjoinpath->parent->relids); + best_path->jpath.outerjoinpath->parent->relids); /* Sort clauses into best execution order */ joinclauses = order_qual_clauses(root, joinclauses); @@ -1551,23 +1550,22 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, /* * Make a copy that will become the fixed clause. * - * We used to try to do a shallow copy here, but that fails if there - * is a subplan in the arguments of the opclause. So just do a - * full copy. + * We used to try to do a shallow copy here, but that fails if there is a + * subplan in the arguments of the opclause. So just do a full copy. */ newclause = (OpExpr *) copyObject((Node *) clause); /* - * Check to see if the indexkey is on the right; if so, commute - * the clause. The indexkey should be the side that refers to - * (only) the base relation. + * Check to see if the indexkey is on the right; if so, commute the + * clause. The indexkey should be the side that refers to (only) the + * base relation. */ if (!bms_equal(rinfo->left_relids, index->rel->relids)) CommuteClause(newclause); /* - * Now, determine which index attribute this is, change the - * indexkey operand as needed, and get the index opclass. + * Now, determine which index attribute this is, change the indexkey + * operand as needed, and get the index opclass. */ linitial(newclause->args) = fix_indexqual_operand(linitial(newclause->args), @@ -1577,10 +1575,9 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, *fixed_indexquals = lappend(*fixed_indexquals, newclause); /* - * Look up the (possibly commuted) operator in the operator class - * to get its strategy numbers and the recheck indicator. This - * also double-checks that we found an operator matching the - * index. + * Look up the (possibly commuted) operator in the operator class to + * get its strategy numbers and the recheck indicator. This also + * double-checks that we found an operator matching the index. */ get_op_opclass_properties(newclause->opno, opclass, &stratno, &stratsubtype, &recheck); @@ -1598,11 +1595,11 @@ static Node * fix_indexqual_operand(Node *node, IndexOptInfo *index, Oid *opclass) { /* - * We represent index keys by Var nodes having the varno of the base - * table but varattno equal to the index's attribute number (index - * column position). This is a bit hokey ... would be cleaner to use - * a special-purpose node type that could not be mistaken for a - * regular Var. But it will do for now. + * We represent index keys by Var nodes having the varno of the base table + * but varattno equal to the index's attribute number (index column + * position). This is a bit hokey ... would be cleaner to use a + * special-purpose node type that could not be mistaken for a regular Var. + * But it will do for now. */ Var *result; int pos; @@ -1692,8 +1689,8 @@ get_switched_clauses(List *clauses, Relids outerrelids) if (bms_is_subset(restrictinfo->right_relids, outerrelids)) { /* - * Duplicate just enough of the structure to allow commuting - * the clause without changing the original list. Could use + * Duplicate just enough of the structure to allow commuting the + * clause without changing the original list. Could use * copyObject, but a complete deep copy is overkill. */ OpExpr *temp = makeNode(OpExpr); @@ -1934,9 +1931,9 @@ make_subqueryscan(List *qptlist, Plan *plan = &node->scan.plan; /* - * Cost is figured here for the convenience of prepunion.c. Note this - * is only correct for the case where qpqual is empty; otherwise - * caller should overwrite cost with a better estimate. + * Cost is figured here for the convenience of prepunion.c. Note this is + * only correct for the case where qpqual is empty; otherwise caller + * should overwrite cost with a better estimate. */ copy_plan_costsize(plan, subplan); plan->total_cost += cpu_tuple_cost * subplan->plan_rows; @@ -1977,9 +1974,9 @@ make_append(List *appendplans, bool isTarget, List *tlist) ListCell *subnode; /* - * Compute cost as sum of subplan costs. We charge nothing extra for - * the Append itself, which perhaps is too optimistic, but since it - * doesn't do any selection or projection, it is a pretty cheap node. + * Compute cost as sum of subplan costs. We charge nothing extra for the + * Append itself, which perhaps is too optimistic, but since it doesn't do + * any selection or projection, it is a pretty cheap node. */ plan->startup_cost = 0; plan->total_cost = 0; @@ -2094,8 +2091,8 @@ make_hash(Plan *lefttree) copy_plan_costsize(plan, lefttree); /* - * For plausibility, make startup & total costs equal total cost of - * input plan; this only affects EXPLAIN display not decisions. + * For plausibility, make startup & total costs equal total cost of input + * plan; this only affects EXPLAIN display not decisions. */ plan->startup_cost = plan->total_cost; plan->targetlist = copyObject(lefttree->targetlist); @@ -2217,8 +2214,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys) Oid *sortOperators; /* - * We will need at most list_length(pathkeys) sort columns; possibly - * less + * We will need at most list_length(pathkeys) sort columns; possibly less */ numsortkeys = list_length(pathkeys); sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber)); @@ -2236,14 +2232,14 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys) /* * We can sort by any one of the sort key items listed in this * sublist. For now, we take the first one that corresponds to an - * available Var in the tlist. If there isn't any, use the first - * one that is an expression in the input's vars. + * available Var in the tlist. If there isn't any, use the first one + * that is an expression in the input's vars. * - * XXX if we have a choice, is there any way of figuring out which - * might be cheapest to execute? (For example, int4lt is likely - * much cheaper to execute than numericlt, but both might appear - * in the same pathkey sublist...) Not clear that we ever will - * have a choice in practice, so it may not matter. + * XXX if we have a choice, is there any way of figuring out which might + * be cheapest to execute? (For example, int4lt is likely much + * cheaper to execute than numericlt, but both might appear in the + * same pathkey sublist...) Not clear that we ever will have a choice + * in practice, so it may not matter. */ foreach(j, keysublist) { @@ -2296,13 +2292,13 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys) } /* - * The column might already be selected as a sort key, if the - * pathkeys contain duplicate entries. (This can happen in - * scenarios where multiple mergejoinable clauses mention the same - * var, for example.) So enter it only once in the sort arrays. + * The column might already be selected as a sort key, if the pathkeys + * contain duplicate entries. (This can happen in scenarios where + * multiple mergejoinable clauses mention the same var, for example.) + * So enter it only once in the sort arrays. */ numsortkeys = add_sort_column(tle->resno, pathkey->sortop, - numsortkeys, sortColIdx, sortOperators); + numsortkeys, sortColIdx, sortOperators); } Assert(numsortkeys > 0); @@ -2328,8 +2324,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree) Oid *sortOperators; /* - * We will need at most list_length(sortcls) sort columns; possibly - * less + * We will need at most list_length(sortcls) sort columns; possibly less */ numsortkeys = list_length(sortcls); sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber)); @@ -2348,7 +2343,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree) * redundantly. */ numsortkeys = add_sort_column(tle->resno, sortcl->sortop, - numsortkeys, sortColIdx, sortOperators); + numsortkeys, sortColIdx, sortOperators); } Assert(numsortkeys > 0); @@ -2384,8 +2379,7 @@ make_sort_from_groupcols(PlannerInfo *root, Oid *sortOperators; /* - * We will need at most list_length(groupcls) sort columns; possibly - * less + * We will need at most list_length(groupcls) sort columns; possibly less */ numsortkeys = list_length(groupcls); sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber)); @@ -2404,7 +2398,7 @@ make_sort_from_groupcols(PlannerInfo *root, * redundantly. */ numsortkeys = add_sort_column(tle->resno, grpcl->sortop, - numsortkeys, sortColIdx, sortOperators); + numsortkeys, sortColIdx, sortOperators); grpno++; } @@ -2492,8 +2486,8 @@ make_agg(PlannerInfo *root, List *tlist, List *qual, plan->total_cost = agg_path.total_cost; /* - * We will produce a single output tuple if not grouping, and a tuple - * per group otherwise. + * We will produce a single output tuple if not grouping, and a tuple per + * group otherwise. */ if (aggstrategy == AGG_PLAIN) plan->plan_rows = 1; @@ -2501,13 +2495,13 @@ make_agg(PlannerInfo *root, List *tlist, List *qual, plan->plan_rows = numGroups; /* - * We also need to account for the cost of evaluation of the qual (ie, - * the HAVING clause) and the tlist. Note that cost_qual_eval doesn't - * charge anything for Aggref nodes; this is okay since they are - * really comparable to Vars. + * We also need to account for the cost of evaluation of the qual (ie, the + * HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge + * anything for Aggref nodes; this is okay since they are really + * comparable to Vars. * - * See notes in grouping_planner about why this routine and make_group - * are the only ones in this file that worry about tlist eval cost. + * See notes in grouping_planner about why this routine and make_group are + * the only ones in this file that worry about tlist eval cost. */ if (qual) { @@ -2559,16 +2553,15 @@ make_group(PlannerInfo *root, plan->plan_rows = numGroups; /* - * We also need to account for the cost of evaluation of the qual (ie, - * the HAVING clause) and the tlist. + * We also need to account for the cost of evaluation of the qual (ie, the + * HAVING clause) and the tlist. * - * XXX this double-counts the cost of evaluation of any expressions used - * for grouping, since in reality those will have been evaluated at a - * lower plan level and will only be copied by the Group node. Worth - * fixing? + * XXX this double-counts the cost of evaluation of any expressions used for + * grouping, since in reality those will have been evaluated at a lower + * plan level and will only be copied by the Group node. Worth fixing? * - * See notes in grouping_planner about why this routine and make_agg are - * the only ones in this file that worry about tlist eval cost. + * See notes in grouping_planner about why this routine and make_agg are the + * only ones in this file that worry about tlist eval cost. */ if (qual) { @@ -2607,16 +2600,16 @@ make_unique(Plan *lefttree, List *distinctList) copy_plan_costsize(plan, lefttree); /* - * Charge one cpu_operator_cost per comparison per input tuple. We - * assume all columns get compared at most of the tuples. (XXX - * probably this is an overestimate.) + * Charge one cpu_operator_cost per comparison per input tuple. We assume + * all columns get compared at most of the tuples. (XXX probably this is + * an overestimate.) */ plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols; /* - * plan->plan_rows is left as a copy of the input subplan's plan_rows; - * ie, we assume the filter removes nothing. The caller must alter - * this if he has a better idea. + * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie, + * we assume the filter removes nothing. The caller must alter this if he + * has a better idea. */ plan->targetlist = copyObject(lefttree->targetlist); @@ -2625,8 +2618,7 @@ make_unique(Plan *lefttree, List *distinctList) plan->righttree = NULL; /* - * convert SortClause list into array of attr indexes, as wanted by - * exec + * convert SortClause list into array of attr indexes, as wanted by exec */ Assert(numCols > 0); uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); @@ -2664,8 +2656,8 @@ make_setop(SetOpCmd cmd, Plan *lefttree, copy_plan_costsize(plan, lefttree); /* - * Charge one cpu_operator_cost per comparison per input tuple. We - * assume all columns get compared at most of the tuples. + * Charge one cpu_operator_cost per comparison per input tuple. We assume + * all columns get compared at most of the tuples. */ plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols; @@ -2683,8 +2675,7 @@ make_setop(SetOpCmd cmd, Plan *lefttree, plan->righttree = NULL; /* - * convert SortClause list into array of attr indexes, as wanted by - * exec + * convert SortClause list into array of attr indexes, as wanted by exec */ Assert(numCols > 0); dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); @@ -2727,8 +2718,8 @@ make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount, * building a subquery then it's important to report correct info to the * outer planner. * - * When the offset or count couldn't be estimated, use 10% of the - * estimated number of rows emitted from the subplan. + * When the offset or count couldn't be estimated, use 10% of the estimated + * number of rows emitted from the subplan. */ if (offset_est != 0) { diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 7e3d5bca55b..dd8fc4fa2d7 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.109 2005/09/28 21:17:02 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -221,7 +221,7 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode, result = bms_add_members(result, distribute_quals_to_rels(root, lfirst(l), - below_outer_join)); + below_outer_join)); } /* @@ -243,17 +243,17 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode, ListCell *qual; /* - * Order of operations here is subtle and critical. First we - * recurse to handle sub-JOINs. Their join quals will be placed - * without regard for whether this level is an outer join, which - * is correct. Then we place our own join quals, which are - * restricted by lower outer joins in any case, and are forced to - * this level if this is an outer join and they mention the outer - * side. Finally, if this is an outer join, we mark baserels - * contained within the inner side(s) with our own rel set; this - * will prevent quals above us in the join tree that use those - * rels from being pushed down below this level. (It's okay for - * upper quals to be pushed down to the outer side, however.) + * Order of operations here is subtle and critical. First we recurse + * to handle sub-JOINs. Their join quals will be placed without + * regard for whether this level is an outer join, which is correct. + * Then we place our own join quals, which are restricted by lower + * outer joins in any case, and are forced to this level if this is an + * outer join and they mention the outer side. Finally, if this is an + * outer join, we mark baserels contained within the inner side(s) + * with our own rel set; this will prevent quals above us in the join + * tree that use those rels from being pushed down below this level. + * (It's okay for upper quals to be pushed down to the outer side, + * however.) */ switch (j->jointype) { @@ -302,19 +302,19 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode, case JOIN_UNION: /* - * This is where we fail if upper levels of planner - * haven't rewritten UNION JOIN as an Append ... + * This is where we fail if upper levels of planner haven't + * rewritten UNION JOIN as an Append ... */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UNION JOIN is not implemented"))); - nonnullable_rels = NULL; /* keep compiler quiet */ + nonnullable_rels = NULL; /* keep compiler quiet */ nullable_rels = NULL; break; default: elog(ERROR, "unrecognized join type: %d", (int) j->jointype); - nonnullable_rels = NULL; /* keep compiler quiet */ + nonnullable_rels = NULL; /* keep compiler quiet */ nullable_rels = NULL; break; } @@ -349,19 +349,19 @@ mark_baserels_for_outer_join(PlannerInfo *root, Relids rels, Relids outerrels) RelOptInfo *rel = find_base_rel(root, relno); /* - * Since we do this bottom-up, any outer-rels previously marked - * should be within the new outer join set. + * Since we do this bottom-up, any outer-rels previously marked should + * be within the new outer join set. */ Assert(bms_is_subset(rel->outerjoinset, outerrels)); /* * Presently the executor cannot support FOR UPDATE/SHARE marking of * rels appearing on the nullable side of an outer join. (It's - * somewhat unclear what that would mean, anyway: what should we - * mark when a result row is generated from no element of the - * nullable relation?) So, complain if target rel is FOR UPDATE/SHARE. - * It's sufficient to make this check once per rel, so do it only - * if rel wasn't already known nullable. + * somewhat unclear what that would mean, anyway: what should we mark + * when a result row is generated from no element of the nullable + * relation?) So, complain if target rel is FOR UPDATE/SHARE. It's + * sufficient to make this check once per rel, so do it only if rel + * wasn't already known nullable. */ if (rel->outerjoinset == NULL) { @@ -430,9 +430,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, /* * If the clause is variable-free, we force it to be evaluated at its * original syntactic level. Note that this should not happen for - * top-level clauses, because query_planner() special-cases them. But - * it will happen for variable-free JOIN/ON clauses. We don't have to - * be real smart about such a case, we just have to be correct. + * top-level clauses, because query_planner() special-cases them. But it + * will happen for variable-free JOIN/ON clauses. We don't have to be + * real smart about such a case, we just have to be correct. */ if (bms_is_empty(relids)) relids = qualscope; @@ -446,8 +446,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, /* * If the qual came from implied-equality deduction, we always * evaluate the qual at its natural semantic level. It is the - * responsibility of the deducer not to create any quals that - * should be delayed by outer-join rules. + * responsibility of the deducer not to create any quals that should + * be delayed by outer-join rules. */ Assert(bms_equal(relids, qualscope)); /* Needn't feed it back for more deductions */ @@ -457,28 +457,28 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, else if (bms_overlap(relids, outerjoin_nonnullable)) { /* - * The qual is attached to an outer join and mentions (some of - * the) rels on the nonnullable side. Force the qual to be - * evaluated exactly at the level of joining corresponding to the - * outer join. We cannot let it get pushed down into the - * nonnullable side, since then we'd produce no output rows, - * rather than the intended single null-extended row, for any - * nonnullable-side rows failing the qual. + * The qual is attached to an outer join and mentions (some of the) + * rels on the nonnullable side. Force the qual to be evaluated + * exactly at the level of joining corresponding to the outer join. We + * cannot let it get pushed down into the nonnullable side, since then + * we'd produce no output rows, rather than the intended single + * null-extended row, for any nonnullable-side rows failing the qual. * - * Note: an outer-join qual that mentions only nullable-side rels can - * be pushed down into the nullable side without changing the join + * Note: an outer-join qual that mentions only nullable-side rels can be + * pushed down into the nullable side without changing the join * result, so we treat it the same as an ordinary inner-join qual, * except for not setting maybe_equijoin (see below). */ relids = qualscope; + /* - * We can't use such a clause to deduce equijoin (the left and - * right sides might be unequal above the join because one of - * them has gone to NULL) ... but we might be able to use it - * for more limited purposes. Note: for the current uses of - * deductions from an outer-join clause, it seems safe to make - * the deductions even when the clause is below a higher-level - * outer join; so we do not check below_outer_join here. + * We can't use such a clause to deduce equijoin (the left and right + * sides might be unequal above the join because one of them has gone + * to NULL) ... but we might be able to use it for more limited + * purposes. Note: for the current uses of deductions from an + * outer-join clause, it seems safe to make the deductions even when + * the clause is below a higher-level outer join; so we do not check + * below_outer_join here. */ maybe_equijoin = false; maybe_outer_join = true; @@ -486,15 +486,14 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, else { /* - * For a non-outer-join qual, we can evaluate the qual as soon as - * (1) we have all the rels it mentions, and (2) we are at or - * above any outer joins that can null any of these rels and are - * below the syntactic location of the given qual. To enforce the - * latter, scan the base rels listed in relids, and merge their - * outer-join sets into the clause's own reference list. At the - * time we are called, the outerjoinset of each baserel will show - * exactly those outer joins that are below the qual in the join - * tree. + * For a non-outer-join qual, we can evaluate the qual as soon as (1) + * we have all the rels it mentions, and (2) we are at or above any + * outer joins that can null any of these rels and are below the + * syntactic location of the given qual. To enforce the latter, scan + * the base rels listed in relids, and merge their outer-join sets + * into the clause's own reference list. At the time we are called, + * the outerjoinset of each baserel will show exactly those outer + * joins that are below the qual in the join tree. */ Relids addrelids = NULL; Relids tmprelids; @@ -513,13 +512,13 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, if (bms_is_subset(addrelids, relids)) { /* - * Qual is not delayed by any lower outer-join restriction. - * If it is not itself below or within an outer join, we - * can consider it "valid everywhere", so consider feeding - * it to the equijoin machinery. (If it is within an outer - * join, we can't consider it "valid everywhere": once the - * contained variables have gone to NULL, we'd be asserting - * things like NULL = NULL, which is not true.) + * Qual is not delayed by any lower outer-join restriction. If it + * is not itself below or within an outer join, we can consider it + * "valid everywhere", so consider feeding it to the equijoin + * machinery. (If it is within an outer join, we can't consider + * it "valid everywhere": once the contained variables have gone + * to NULL, we'd be asserting things like NULL = NULL, which is + * not true.) */ if (!below_outer_join && outerjoin_nonnullable == NULL) maybe_equijoin = true; @@ -533,8 +532,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, Assert(bms_is_subset(relids, qualscope)); /* - * Because application of the qual will be delayed by outer - * join, we mustn't assume its vars are equal everywhere. + * Because application of the qual will be delayed by outer join, + * we mustn't assume its vars are equal everywhere. */ maybe_equijoin = false; } @@ -543,11 +542,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, } /* - * Mark the qual as "pushed down" if it can be applied at a level - * below its original syntactic level. This allows us to distinguish - * original JOIN/ON quals from higher-level quals pushed down to the - * same joinrel. A qual originating from WHERE is always considered - * "pushed down". + * Mark the qual as "pushed down" if it can be applied at a level below + * its original syntactic level. This allows us to distinguish original + * JOIN/ON quals from higher-level quals pushed down to the same joinrel. + * A qual originating from WHERE is always considered "pushed down". */ if (!is_pushed_down) is_pushed_down = !bms_equal(relids, qualscope); @@ -573,25 +571,24 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, rel = find_base_rel(root, bms_singleton_member(relids)); /* - * Check for a "mergejoinable" clause even though it's not a - * join clause. This is so that we can recognize that "a.x = - * a.y" makes x and y eligible to be considered equal, even - * when they belong to the same rel. Without this, we would - * not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q" - * allows us to consider z and q equal after their rels are - * joined. + * Check for a "mergejoinable" clause even though it's not a join + * clause. This is so that we can recognize that "a.x = a.y" + * makes x and y eligible to be considered equal, even when they + * belong to the same rel. Without this, we would not recognize + * that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to + * consider z and q equal after their rels are joined. */ check_mergejoinable(restrictinfo); /* - * If the clause was deduced from implied equality, check to - * see whether it is redundant with restriction clauses we - * already have for this rel. Note we cannot apply this check - * to user-written clauses, since we haven't found the - * canonical pathkey sets yet while processing user clauses. - * (NB: no comparable check is done in the join-clause case; - * redundancy will be detected when the join clause is moved - * into a join rel's restriction list.) + * If the clause was deduced from implied equality, check to see + * whether it is redundant with restriction clauses we already + * have for this rel. Note we cannot apply this check to + * user-written clauses, since we haven't found the canonical + * pathkey sets yet while processing user clauses. (NB: no + * comparable check is done in the join-clause case; redundancy + * will be detected when the join clause is moved into a join + * rel's restriction list.) */ if (!is_deduced || !qual_is_redundant(root, restrictinfo, @@ -605,17 +602,17 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, case BMS_MULTIPLE: /* - * 'clause' is a join clause, since there is more than one rel - * in the relid set. + * 'clause' is a join clause, since there is more than one rel in + * the relid set. */ /* * Check for hash or mergejoinable operators. * - * We don't bother setting the hashjoin info if we're not going - * to need it. We do want to know about mergejoinable ops in - * all cases, however, because we use mergejoinable ops for - * other purposes such as detecting redundant clauses. + * We don't bother setting the hashjoin info if we're not going to + * need it. We do want to know about mergejoinable ops in all + * cases, however, because we use mergejoinable ops for other + * purposes such as detecting redundant clauses. */ check_mergejoinable(restrictinfo); if (enable_hashjoin) @@ -628,9 +625,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, /* * Add vars used in the join clause to targetlists of their - * relations, so that they will be emitted by the plan nodes - * that scan those relations (else they won't be available at - * the join node!). + * relations, so that they will be emitted by the plan nodes that + * scan those relations (else they won't be available at the join + * node!). */ vars = pull_var_clause(clause, false); add_vars_to_targetlist(root, vars, relids); @@ -639,17 +636,16 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, default: /* - * 'clause' references no rels, and therefore we have no place - * to attach it. Shouldn't get here if callers are working - * properly. + * 'clause' references no rels, and therefore we have no place to + * attach it. Shouldn't get here if callers are working properly. */ elog(ERROR, "cannot cope with variable-free clause"); break; } /* - * If the clause has a mergejoinable operator, we may be able to - * deduce more things from it under the principle of transitivity. + * If the clause has a mergejoinable operator, we may be able to deduce + * more things from it under the principle of transitivity. * * If it is not an outer-join qualification nor bubbled up due to an outer * join, then the two sides represent equivalent PathKeyItems for path @@ -744,8 +740,8 @@ process_implied_equality(PlannerInfo *root, /* * If the exprs involve a single rel, we need to look at that rel's - * baserestrictinfo list. If multiple rels, we can scan the joininfo - * list of any of 'em. + * baserestrictinfo list. If multiple rels, we can scan the joininfo list + * of any of 'em. */ if (membership == BMS_SINGLETON) { @@ -767,8 +763,8 @@ process_implied_equality(PlannerInfo *root, } /* - * Scan to see if equality is already known. If so, we're done in the - * add case, and done after removing it in the delete case. + * Scan to see if equality is already known. If so, we're done in the add + * case, and done after removing it in the delete case. */ foreach(itm, restrictlist) { @@ -791,7 +787,7 @@ process_implied_equality(PlannerInfo *root, { /* delete it from local restrictinfo list */ rel1->baserestrictinfo = list_delete_ptr(rel1->baserestrictinfo, - restrictinfo); + restrictinfo); } else { @@ -808,8 +804,8 @@ process_implied_equality(PlannerInfo *root, return; /* - * This equality is new information, so construct a clause - * representing it to add to the query data structures. + * This equality is new information, so construct a clause representing it + * to add to the query data structures. */ ltype = exprType(item1); rtype = exprType(item2); @@ -818,14 +814,14 @@ process_implied_equality(PlannerInfo *root, if (!HeapTupleIsValid(eq_operator)) { /* - * Would it be safe to just not add the equality to the query if - * we have no suitable equality operator for the combination of + * Would it be safe to just not add the equality to the query if we + * have no suitable equality operator for the combination of * datatypes? NO, because sortkey selection may screw up anyway. */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for types %s and %s", - format_type_be(ltype), format_type_be(rtype)))); + errmsg("could not identify an equality operator for types %s and %s", + format_type_be(ltype), format_type_be(rtype)))); } pgopform = (Form_pg_operator) GETSTRUCT(eq_operator); @@ -856,8 +852,8 @@ process_implied_equality(PlannerInfo *root, /* * Push the new clause into all the appropriate restrictinfo lists. * - * Note: we mark the qual "pushed down" to ensure that it can never be - * taken for an original JOIN/ON clause. + * Note: we mark the qual "pushed down" to ensure that it can never be taken + * for an original JOIN/ON clause. */ distribute_qual_to_rels(root, (Node *) clause, true, true, false, NULL, relids); @@ -911,9 +907,9 @@ qual_is_redundant(PlannerInfo *root, return false; /* - * Scan existing quals to find those referencing same pathkeys. - * Usually there will be few, if any, so build a list of just the - * interesting ones. + * Scan existing quals to find those referencing same pathkeys. Usually + * there will be few, if any, so build a list of just the interesting + * ones. */ oldquals = NIL; foreach(olditem, restrictlist) @@ -933,11 +929,10 @@ qual_is_redundant(PlannerInfo *root, /* * Now, we want to develop a list of exprs that are known equal to the - * left side of the new qual. We traverse the old-quals list - * repeatedly to transitively expand the exprs list. If at any point - * we find we can reach the right-side expr of the new qual, we are - * done. We give up when we can't expand the equalexprs list any - * more. + * left side of the new qual. We traverse the old-quals list repeatedly + * to transitively expand the exprs list. If at any point we find we can + * reach the right-side expr of the new qual, we are done. We give up + * when we can't expand the equalexprs list any more. */ equalexprs = list_make1(newleft); do diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index f2002a5228d..7c2f0211f10 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.9 2005/09/21 19:15:27 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -43,12 +43,12 @@ typedef struct static bool find_minmax_aggs_walker(Node *node, List **context); static bool build_minmax_path(PlannerInfo *root, RelOptInfo *rel, - MinMaxAggInfo *info); + MinMaxAggInfo *info); static ScanDirection match_agg_to_index_col(MinMaxAggInfo *info, - IndexOptInfo *index, int indexcol); + IndexOptInfo *index, int indexcol); static void make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, - List *constant_quals); -static Node *replace_aggs_with_params_mutator(Node *node, List **context); + List *constant_quals); +static Node *replace_aggs_with_params_mutator(Node *node, List **context); static Oid fetch_agg_sort_op(Oid aggfnoid); @@ -62,7 +62,7 @@ static Oid fetch_agg_sort_op(Oid aggfnoid); * generic scan-all-the-rows plan. * * We are passed the preprocessed tlist, and the best path - * devised for computing the input of a standard Agg node. If we are able + * devised for computing the input of a standard Agg node. If we are able * to optimize all the aggregates, and the result is estimated to be cheaper * than the generic aggregate method, then generate and return a Plan that * does it that way. Otherwise, return NULL. @@ -87,24 +87,24 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) if (!parse->hasAggs) return NULL; - Assert(!parse->setOperations); /* shouldn't get here if a setop */ - Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */ + Assert(!parse->setOperations); /* shouldn't get here if a setop */ + Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */ /* * Reject unoptimizable cases. * - * We don't handle GROUP BY, because our current implementations of - * grouping require looking at all the rows anyway, and so there's not - * much point in optimizing MIN/MAX. + * We don't handle GROUP BY, because our current implementations of grouping + * require looking at all the rows anyway, and so there's not much point + * in optimizing MIN/MAX. */ if (parse->groupClause) return NULL; /* - * We also restrict the query to reference exactly one table, since - * join conditions can't be handled reasonably. (We could perhaps - * handle a query containing cartesian-product joins, but it hardly - * seems worth the trouble.) + * We also restrict the query to reference exactly one table, since join + * conditions can't be handled reasonably. (We could perhaps handle a + * query containing cartesian-product joins, but it hardly seems worth the + * trouble.) */ Assert(parse->jointree != NULL && IsA(parse->jointree, FromExpr)); if (list_length(parse->jointree->fromlist) != 1) @@ -118,8 +118,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) rel = find_base_rel(root, rtr->rtindex); /* - * Also reject cases with subplans or volatile functions in WHERE. - * This may be overly paranoid, but it's not entirely clear if the + * Also reject cases with subplans or volatile functions in WHERE. This + * may be overly paranoid, but it's not entirely clear if the * transformation is safe then. */ if (contain_subplans(parse->jointree->quals) || @@ -127,17 +127,16 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) return NULL; /* - * Since this optimization is not applicable all that often, we want - * to fall out before doing very much work if possible. Therefore - * we do the work in several passes. The first pass scans the tlist - * and HAVING qual to find all the aggregates and verify that - * each of them is a MIN/MAX aggregate. If that succeeds, the second - * pass looks at each aggregate to see if it is optimizable; if so - * we make an IndexPath describing how we would scan it. (We do not - * try to optimize if only some aggs are optimizable, since that means - * we'll have to scan all the rows anyway.) If that succeeds, we have - * enough info to compare costs against the generic implementation. - * Only if that test passes do we build a Plan. + * Since this optimization is not applicable all that often, we want to + * fall out before doing very much work if possible. Therefore we do the + * work in several passes. The first pass scans the tlist and HAVING qual + * to find all the aggregates and verify that each of them is a MIN/MAX + * aggregate. If that succeeds, the second pass looks at each aggregate + * to see if it is optimizable; if so we make an IndexPath describing how + * we would scan it. (We do not try to optimize if only some aggs are + * optimizable, since that means we'll have to scan all the rows anyway.) + * If that succeeds, we have enough info to compare costs against the + * generic implementation. Only if that test passes do we build a Plan. */ /* Pass 1: find all the aggregates */ @@ -161,9 +160,9 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) /* * Make the cost comparison. * - * Note that we don't include evaluation cost of the tlist here; - * this is OK since it isn't included in best_path's cost either, - * and should be the same in either case. + * Note that we don't include evaluation cost of the tlist here; this is OK + * since it isn't included in best_path's cost either, and should be the + * same in either case. */ cost_agg(&agg_p, root, AGG_PLAIN, list_length(aggs_list), 0, 0, @@ -174,13 +173,13 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) return NULL; /* too expensive */ /* - * OK, we are going to generate an optimized plan. The first thing we - * need to do is look for any non-variable WHERE clauses that query_planner - * might have removed from the basic plan. (Normal WHERE clauses will - * be properly incorporated into the sub-plans by create_plan.) If there - * are any, they will be in a gating Result node atop the best_path. - * They have to be incorporated into a gating Result in each sub-plan - * in order to produce the semantically correct result. + * OK, we are going to generate an optimized plan. The first thing we + * need to do is look for any non-variable WHERE clauses that + * query_planner might have removed from the basic plan. (Normal WHERE + * clauses will be properly incorporated into the sub-plans by + * create_plan.) If there are any, they will be in a gating Result node + * atop the best_path. They have to be incorporated into a gating Result + * in each sub-plan in order to produce the semantically correct result. */ if (IsA(best_path, ResultPath)) { @@ -275,8 +274,8 @@ find_minmax_aggs_walker(Node *node, List **context) *context = lappend(*context, info); /* - * We need not recurse into the argument, since it can't contain - * any aggregates. + * We need not recurse into the argument, since it can't contain any + * aggregates. */ return false; } @@ -325,8 +324,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info) /* * Look for a match to one of the index columns. (In a stupidly - * designed index, there could be multiple matches, but we only - * care about the first one.) + * designed index, there could be multiple matches, but we only care + * about the first one.) */ for (indexcol = 0; indexcol < index->ncolumns; indexcol++) { @@ -340,12 +339,12 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info) /* * If the match is not at the first index column, we have to verify * that there are "x = something" restrictions on all the earlier - * index columns. Since we'll need the restrictclauses list anyway - * to build the path, it's convenient to extract that first and then - * look through it for the equality restrictions. + * index columns. Since we'll need the restrictclauses list anyway to + * build the path, it's convenient to extract that first and then look + * through it for the equality restrictions. */ restrictclauses = group_clauses_by_indexkey(index, - index->rel->baserestrictinfo, + index->rel->baserestrictinfo, NIL, NULL, &found_clause); @@ -354,8 +353,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info) continue; /* definitely haven't got enough */ for (prevcol = 0; prevcol < indexcol; prevcol++) { - List *rinfos = (List *) list_nth(restrictclauses, prevcol); - ListCell *ll; + List *rinfos = (List *) list_nth(restrictclauses, prevcol); + ListCell *ll; foreach(ll, rinfos) { @@ -453,9 +452,9 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) NullTest *ntest; /* - * Generate a suitably modified query. Much of the work here is - * probably unnecessary in the normal case, but we want to make it look - * good if someone tries to EXPLAIN the result. + * Generate a suitably modified query. Much of the work here is probably + * unnecessary in the normal case, but we want to make it look good if + * someone tries to EXPLAIN the result. */ memcpy(&subroot, root, sizeof(PlannerInfo)); subroot.parse = subparse = (Query *) copyObject(root->parse); @@ -489,18 +488,17 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) false, true); /* - * Generate the plan for the subquery. We already have a Path for - * the basic indexscan, but we have to convert it to a Plan and - * attach a LIMIT node above it. We might need a gating Result, too, - * to handle any non-variable qual clauses. + * Generate the plan for the subquery. We already have a Path for the + * basic indexscan, but we have to convert it to a Plan and attach a LIMIT + * node above it. We might need a gating Result, too, to handle any + * non-variable qual clauses. * - * Also we must add a "WHERE foo IS NOT NULL" restriction to the - * indexscan, to be sure we don't return a NULL, which'd be contrary - * to the standard behavior of MIN/MAX. XXX ideally this should be - * done earlier, so that the selectivity of the restriction could be - * included in our cost estimates. But that looks painful, and in - * most cases the fraction of NULLs isn't high enough to change the - * decision. + * Also we must add a "WHERE foo IS NOT NULL" restriction to the indexscan, + * to be sure we don't return a NULL, which'd be contrary to the standard + * behavior of MIN/MAX. XXX ideally this should be done earlier, so that + * the selectivity of the restriction could be included in our cost + * estimates. But that looks painful, and in most cases the fraction of + * NULLs isn't high enough to change the decision. */ plan = create_plan(&subroot, (Path *) info->path); @@ -517,7 +515,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) copyObject(constant_quals), plan); - plan = (Plan *) make_limit(plan, + plan = (Plan *) make_limit(plan, subparse->limitOffset, subparse->limitCount, 0, 1); @@ -534,7 +532,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) * Replace original aggregate calls with subplan output Params */ static Node * -replace_aggs_with_params_mutator(Node *node, List **context) +replace_aggs_with_params_mutator(Node *node, List **context) { if (node == NULL) return NULL; diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index 24d53be9e97..ecbf44400c9 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -14,7 +14,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.88 2005/09/28 21:17:02 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -57,7 +57,7 @@ * does not use grouping * * Note: the PlannerInfo node also includes a query_pathkeys field, which is - * both an input and an output of query_planner(). The input value signals + * both an input and an output of query_planner(). The input value signals * query_planner that the indicated sort order is wanted in the final output * plan. But this value has not yet been "canonicalized", since the needed * info does not get computed until we scan the qual clauses. We canonicalize @@ -99,7 +99,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, if (parse->jointree->fromlist == NIL) { *cheapest_path = (Path *) create_result_path(NULL, NULL, - (List *) parse->jointree->quals); + (List *) parse->jointree->quals); *sorted_path = NULL; return; } @@ -107,21 +107,21 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, /* * Pull out any non-variable WHERE clauses so these can be put in a * toplevel "Result" node, where they will gate execution of the whole - * plan (the Result will not invoke its descendant plan unless the - * quals are true). Note that any *really* non-variable quals will - * have been optimized away by eval_const_expressions(). What we're - * mostly interested in here is quals that depend only on outer-level - * vars, although if the qual reduces to "WHERE FALSE" this path will - * also be taken. + * plan (the Result will not invoke its descendant plan unless the quals + * are true). Note that any *really* non-variable quals will have been + * optimized away by eval_const_expressions(). What we're mostly + * interested in here is quals that depend only on outer-level vars, + * although if the qual reduces to "WHERE FALSE" this path will also be + * taken. */ parse->jointree->quals = (Node *) pull_constant_clauses((List *) parse->jointree->quals, &constant_quals); /* - * Init planner lists to empty. We create the base_rel_array with a - * size that will be sufficient if no pullups or inheritance additions - * happen ... otherwise it will be enlarged as needed. + * Init planner lists to empty. We create the base_rel_array with a size + * that will be sufficient if no pullups or inheritance additions happen + * ... otherwise it will be enlarged as needed. * * NOTE: in_info_list was set up by subquery_planner, do not touch here */ @@ -141,33 +141,32 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, add_base_rels_to_query(root, (Node *) parse->jointree); /* - * Examine the targetlist and qualifications, adding entries to - * baserel targetlists for all referenced Vars. Restrict and join - * clauses are added to appropriate lists belonging to the mentioned - * relations. We also build lists of equijoined keys for pathkey - * construction. + * Examine the targetlist and qualifications, adding entries to baserel + * targetlists for all referenced Vars. Restrict and join clauses are + * added to appropriate lists belonging to the mentioned relations. We + * also build lists of equijoined keys for pathkey construction. * - * Note: all subplan nodes will have "flat" (var-only) tlists. This - * implies that all expression evaluations are done at the root of the - * plan tree. Once upon a time there was code to try to push - * expensive function calls down to lower plan nodes, but that's dead - * code and has been for a long time... + * Note: all subplan nodes will have "flat" (var-only) tlists. This implies + * that all expression evaluations are done at the root of the plan tree. + * Once upon a time there was code to try to push expensive function calls + * down to lower plan nodes, but that's dead code and has been for a long + * time... */ build_base_rel_tlists(root, tlist); (void) distribute_quals_to_rels(root, (Node *) parse->jointree, false); /* - * Use the completed lists of equijoined keys to deduce any implied - * but unstated equalities (for example, A=B and B=C imply A=C). + * Use the completed lists of equijoined keys to deduce any implied but + * unstated equalities (for example, A=B and B=C imply A=C). */ generate_implied_equalities(root); /* - * We should now have all the pathkey equivalence sets built, so it's - * now possible to convert the requested query_pathkeys to canonical - * form. Also canonicalize the groupClause and sortClause pathkeys - * for use later. + * We should now have all the pathkey equivalence sets built, so it's now + * possible to convert the requested query_pathkeys to canonical form. + * Also canonicalize the groupClause and sortClause pathkeys for use + * later. */ root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys); root->group_pathkeys = canonicalize_pathkeys(root, root->group_pathkeys); @@ -182,13 +181,13 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, elog(ERROR, "failed to construct the join relation"); /* - * If there's grouping going on, estimate the number of result groups. - * We couldn't do this any earlier because it depends on relation size + * If there's grouping going on, estimate the number of result groups. We + * couldn't do this any earlier because it depends on relation size * estimates that were set up above. * - * Then convert tuple_fraction to fractional form if it is absolute, - * and adjust it based on the knowledge that grouping_planner will be - * doing grouping or aggregation work with our result. + * Then convert tuple_fraction to fractional form if it is absolute, and + * adjust it based on the knowledge that grouping_planner will be doing + * grouping or aggregation work with our result. * * This introduces some undesirable coupling between this code and * grouping_planner, but the alternatives seem even uglier; we couldn't @@ -205,18 +204,18 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, final_rel->rows); /* - * In GROUP BY mode, an absolute LIMIT is relative to the number - * of groups not the number of tuples. If the caller gave us - * a fraction, keep it as-is. (In both cases, we are effectively - * assuming that all the groups are about the same size.) + * In GROUP BY mode, an absolute LIMIT is relative to the number of + * groups not the number of tuples. If the caller gave us a fraction, + * keep it as-is. (In both cases, we are effectively assuming that + * all the groups are about the same size.) */ if (tuple_fraction >= 1.0) tuple_fraction /= *num_groups; /* * If both GROUP BY and ORDER BY are specified, we will need two - * levels of sort --- and, therefore, certainly need to read all - * the tuples --- unless ORDER BY is a subset of GROUP BY. + * levels of sort --- and, therefore, certainly need to read all the + * tuples --- unless ORDER BY is a subset of GROUP BY. */ if (parse->groupClause && parse->sortClause && !pathkeys_contained_in(root->sort_pathkeys, root->group_pathkeys)) @@ -225,8 +224,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, else if (parse->hasAggs || root->hasHavingQual) { /* - * Ungrouped aggregate will certainly want to read all the tuples, - * and it will deliver a single result row (so leave *num_groups 1). + * Ungrouped aggregate will certainly want to read all the tuples, and + * it will deliver a single result row (so leave *num_groups 1). */ tuple_fraction = 0.0; } @@ -234,11 +233,11 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, { /* * Since there was no grouping or aggregation, it's reasonable to - * assume the UNIQUE filter has effects comparable to GROUP BY. - * Return the estimated number of output rows for use by caller. - * (If DISTINCT is used with grouping, we ignore its effects for - * rowcount estimation purposes; this amounts to assuming the grouped - * rows are distinct already.) + * assume the UNIQUE filter has effects comparable to GROUP BY. Return + * the estimated number of output rows for use by caller. (If DISTINCT + * is used with grouping, we ignore its effects for rowcount + * estimation purposes; this amounts to assuming the grouped rows are + * distinct already.) */ List *distinctExprs; @@ -257,26 +256,26 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, else { /* - * Plain non-grouped, non-aggregated query: an absolute tuple - * fraction can be divided by the number of tuples. + * Plain non-grouped, non-aggregated query: an absolute tuple fraction + * can be divided by the number of tuples. */ if (tuple_fraction >= 1.0) tuple_fraction /= final_rel->rows; } /* - * Pick out the cheapest-total path and the cheapest presorted path - * for the requested pathkeys (if there is one). We should take the - * tuple fraction into account when selecting the cheapest presorted - * path, but not when selecting the cheapest-total path, since if we - * have to sort then we'll have to fetch all the tuples. (But there's - * a special case: if query_pathkeys is NIL, meaning order doesn't - * matter, then the "cheapest presorted" path will be the cheapest - * overall for the tuple fraction.) + * Pick out the cheapest-total path and the cheapest presorted path for + * the requested pathkeys (if there is one). We should take the tuple + * fraction into account when selecting the cheapest presorted path, but + * not when selecting the cheapest-total path, since if we have to sort + * then we'll have to fetch all the tuples. (But there's a special case: + * if query_pathkeys is NIL, meaning order doesn't matter, then the + * "cheapest presorted" path will be the cheapest overall for the tuple + * fraction.) * - * The cheapest-total path is also the one to use if grouping_planner - * decides to use hashed aggregation, so we return it separately even - * if this routine thinks the presorted path is the winner. + * The cheapest-total path is also the one to use if grouping_planner decides + * to use hashed aggregation, so we return it separately even if this + * routine thinks the presorted path is the winner. */ cheapestpath = final_rel->cheapest_total_path; @@ -291,8 +290,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, /* * Forget about the presorted path if it would be cheaper to sort the - * cheapest-total path. Here we need consider only the behavior at - * the tuple fraction point. + * cheapest-total path. Here we need consider only the behavior at the + * tuple fraction point. */ if (sortedpath) { @@ -323,8 +322,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, } /* - * If we have constant quals, add a toplevel Result step to process - * them. + * If we have constant quals, add a toplevel Result step to process them. */ if (constant_quals) { diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ace53d692fb..762dfb4b641 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.193 2005/09/24 22:54:37 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -59,8 +59,8 @@ static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode); static Plan *inheritance_planner(PlannerInfo *root, List *inheritlist); static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction); static double preprocess_limit(PlannerInfo *root, - double tuple_fraction, - int *offset_est, int *count_est); + double tuple_fraction, + int *offset_est, int *count_est); static bool choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, Path *cheapest_path, Path *sorted_path, double dNumGroups, AggClauseCounts *agg_counts); @@ -95,14 +95,13 @@ planner(Query *parse, bool isCursor, int cursorOptions, * these global state variables must be saved and restored. * * Query level and the param list cannot be moved into the per-query - * PlannerInfo structure since their whole purpose is communication - * across multiple sub-queries. Also, boundParams is explicitly info - * from outside the query, and so is likewise better handled as a global - * variable. + * PlannerInfo structure since their whole purpose is communication across + * multiple sub-queries. Also, boundParams is explicitly info from outside + * the query, and so is likewise better handled as a global variable. * - * Note we do NOT save and restore PlannerPlanId: it exists to assign - * unique IDs to SubPlan nodes, and we want those IDs to be unique for - * the life of a backend. Also, PlannerInitPlan is saved/restored in + * Note we do NOT save and restore PlannerPlanId: it exists to assign unique + * IDs to SubPlan nodes, and we want those IDs to be unique for the life + * of a backend. Also, PlannerInitPlan is saved/restored in * subquery_planner, not here. */ save_PlannerQueryLevel = PlannerQueryLevel; @@ -118,10 +117,10 @@ planner(Query *parse, bool isCursor, int cursorOptions, if (isCursor) { /* - * We have no real idea how many tuples the user will ultimately - * FETCH from a cursor, but it seems a good bet that he doesn't - * want 'em all. Optimize for 10% retrieval (you gotta better - * number? Should this be a SETtable parameter?) + * We have no real idea how many tuples the user will ultimately FETCH + * from a cursor, but it seems a good bet that he doesn't want 'em + * all. Optimize for 10% retrieval (you gotta better number? Should + * this be a SETtable parameter?) */ tuple_fraction = 0.10; } @@ -207,10 +206,10 @@ subquery_planner(Query *parse, double tuple_fraction, root->parse = parse; /* - * Look for IN clauses at the top level of WHERE, and transform them - * into joins. Note that this step only handles IN clauses originally - * at top level of WHERE; if we pull up any subqueries in the next - * step, their INs are processed just before pulling them up. + * Look for IN clauses at the top level of WHERE, and transform them into + * joins. Note that this step only handles IN clauses originally at top + * level of WHERE; if we pull up any subqueries in the next step, their + * INs are processed just before pulling them up. */ root->in_info_list = NIL; if (parse->hasSubLinks) @@ -225,14 +224,14 @@ subquery_planner(Query *parse, double tuple_fraction, pull_up_subqueries(root, (Node *) parse->jointree, false); /* - * Detect whether any rangetable entries are RTE_JOIN kind; if not, we - * can avoid the expense of doing flatten_join_alias_vars(). Also - * check for outer joins --- if none, we can skip reduce_outer_joins() - * and some other processing. This must be done after we have done + * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can + * avoid the expense of doing flatten_join_alias_vars(). Also check for + * outer joins --- if none, we can skip reduce_outer_joins() and some + * other processing. This must be done after we have done * pull_up_subqueries, of course. * * Note: if reduce_outer_joins manages to eliminate all outer joins, - * root->hasOuterJoins is not reset currently. This is OK since its + * root->hasOuterJoins is not reset currently. This is OK since its * purpose is merely to suppress unnecessary processing in simple cases. */ root->hasJoinRTEs = false; @@ -255,8 +254,8 @@ subquery_planner(Query *parse, double tuple_fraction, /* * Set hasHavingQual to remember if HAVING clause is present. Needed - * because preprocess_expression will reduce a constant-true condition - * to an empty qual list ... but "HAVING TRUE" is not a semantic no-op. + * because preprocess_expression will reduce a constant-true condition to + * an empty qual list ... but "HAVING TRUE" is not a semantic no-op. */ root->hasHavingQual = (parse->havingQual != NULL); @@ -292,29 +291,29 @@ subquery_planner(Query *parse, double tuple_fraction, } /* - * In some cases we may want to transfer a HAVING clause into WHERE. - * We cannot do so if the HAVING clause contains aggregates (obviously) - * or volatile functions (since a HAVING clause is supposed to be executed + * In some cases we may want to transfer a HAVING clause into WHERE. We + * cannot do so if the HAVING clause contains aggregates (obviously) or + * volatile functions (since a HAVING clause is supposed to be executed * only once per group). Also, it may be that the clause is so expensive * to execute that we're better off doing it only once per group, despite * the loss of selectivity. This is hard to estimate short of doing the * entire planning process twice, so we use a heuristic: clauses - * containing subplans are left in HAVING. Otherwise, we move or copy - * the HAVING clause into WHERE, in hopes of eliminating tuples before + * containing subplans are left in HAVING. Otherwise, we move or copy the + * HAVING clause into WHERE, in hopes of eliminating tuples before * aggregation instead of after. * - * If the query has explicit grouping then we can simply move such a - * clause into WHERE; any group that fails the clause will not be - * in the output because none of its tuples will reach the grouping - * or aggregation stage. Otherwise we must have a degenerate - * (variable-free) HAVING clause, which we put in WHERE so that - * query_planner() can use it in a gating Result node, but also keep - * in HAVING to ensure that we don't emit a bogus aggregated row. - * (This could be done better, but it seems not worth optimizing.) + * If the query has explicit grouping then we can simply move such a clause + * into WHERE; any group that fails the clause will not be in the output + * because none of its tuples will reach the grouping or aggregation + * stage. Otherwise we must have a degenerate (variable-free) HAVING + * clause, which we put in WHERE so that query_planner() can use it in a + * gating Result node, but also keep in HAVING to ensure that we don't + * emit a bogus aggregated row. (This could be done better, but it seems + * not worth optimizing.) * * Note that both havingQual and parse->jointree->quals are in - * implicitly-ANDed-list form at this point, even though they are - * declared as Node *. + * implicitly-ANDed-list form at this point, even though they are declared + * as Node *. */ newHaving = NIL; foreach(l, (List *) parse->havingQual) @@ -346,28 +345,27 @@ subquery_planner(Query *parse, double tuple_fraction, parse->havingQual = (Node *) newHaving; /* - * If we have any outer joins, try to reduce them to plain inner - * joins. This step is most easily done after we've done expression + * If we have any outer joins, try to reduce them to plain inner joins. + * This step is most easily done after we've done expression * preprocessing. */ if (root->hasOuterJoins) reduce_outer_joins(root); /* - * See if we can simplify the jointree; opportunities for this may - * come from having pulled up subqueries, or from flattening explicit - * JOIN syntax. We must do this after flattening JOIN alias - * variables, since eliminating explicit JOIN nodes from the jointree - * will cause get_relids_for_join() to fail. But it should happen - * after reduce_outer_joins, anyway. + * See if we can simplify the jointree; opportunities for this may come + * from having pulled up subqueries, or from flattening explicit JOIN + * syntax. We must do this after flattening JOIN alias variables, since + * eliminating explicit JOIN nodes from the jointree will cause + * get_relids_for_join() to fail. But it should happen after + * reduce_outer_joins, anyway. */ parse->jointree = (FromExpr *) simplify_jointree(root, (Node *) parse->jointree); /* - * Do the main planning. If we have an inherited target relation, - * that needs special processing, else go straight to - * grouping_planner. + * Do the main planning. If we have an inherited target relation, that + * needs special processing, else go straight to grouping_planner. */ if (parse->resultRelation && (lst = expand_inherited_rtentry(root, parse->resultRelation)) != NIL) @@ -377,8 +375,8 @@ subquery_planner(Query *parse, double tuple_fraction, /* * If any subplans were generated, or if we're inside a subplan, build - * initPlan list and extParam/allParam sets for plan nodes, and attach - * the initPlans to the top plan node. + * initPlan list and extParam/allParam sets for plan nodes, and attach the + * initPlans to the top plan node. */ if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1) SS_finalize_plan(plan, parse->rtable); @@ -405,9 +403,9 @@ static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind) { /* - * Fall out quickly if expression is empty. This occurs often enough - * to be worth checking. Note that null->null is the correct conversion - * for implicit-AND result format, too. + * Fall out quickly if expression is empty. This occurs often enough to + * be worth checking. Note that null->null is the correct conversion for + * implicit-AND result format, too. */ if (expr == NULL) return NULL; @@ -415,8 +413,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) /* * If the query has any join RTEs, replace join alias variables with * base-relation variables. We must do this before sublink processing, - * else sublinks expanded out from join aliases wouldn't get - * processed. + * else sublinks expanded out from join aliases wouldn't get processed. */ if (root->hasJoinRTEs) expr = flatten_join_alias_vars(root, expr); @@ -429,13 +426,13 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) * careful to maintain AND/OR flatness --- that is, do not generate a tree * with AND directly under AND, nor OR directly under OR. * - * Because this is a relatively expensive process, we skip it when the - * query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". - * The expression will only be evaluated once anyway, so no point in + * Because this is a relatively expensive process, we skip it when the query + * is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The + * expression will only be evaluated once anyway, so no point in * pre-simplifying; we can't execute it any faster than the executor can, * and we will waste cycles copying the tree. Notice however that we - * still must do it for quals (to get AND/OR flatness); and if we are - * in a subquery we should not assume it will be done only once. + * still must do it for quals (to get AND/OR flatness); and if we are in a + * subquery we should not assume it will be done only once. */ if (root->parse->jointree->fromlist != NIL || kind == EXPRKIND_QUAL || @@ -460,8 +457,8 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL)); /* - * XXX do not insert anything here unless you have grokked the - * comments in SS_replace_correlation_vars ... + * XXX do not insert anything here unless you have grokked the comments in + * SS_replace_correlation_vars ... */ /* Replace uplevel vars with Param nodes */ @@ -469,9 +466,9 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) expr = SS_replace_correlation_vars(expr); /* - * If it's a qual or havingQual, convert it to implicit-AND format. - * (We don't want to do this before eval_const_expressions, since the - * latter would be unable to simplify a top-level AND correctly. Also, + * If it's a qual or havingQual, convert it to implicit-AND format. (We + * don't want to do this before eval_const_expressions, since the latter + * would be unable to simplify a top-level AND correctly. Also, * SS_process_sublinks expects explicit-AND format.) */ if (kind == EXPRKIND_QUAL) @@ -557,9 +554,9 @@ inheritance_planner(PlannerInfo *root, List *inheritlist) Plan *subplan; /* - * Generate modified query with this rel as target. We have to - * be prepared to translate varnos in in_info_list as well as in - * the Query proper. + * Generate modified query with this rel as target. We have to be + * prepared to translate varnos in in_info_list as well as in the + * Query proper. */ memcpy(&subroot, root, sizeof(PlannerInfo)); subroot.parse = (Query *) @@ -580,26 +577,26 @@ inheritance_planner(PlannerInfo *root, List *inheritlist) * XXX my goodness this next bit is ugly. Really need to think about * ways to rein in planner's habit of scribbling on its input. * - * Planning of the subquery might have modified the rangetable, - * either by addition of RTEs due to expansion of inherited source - * tables, or by changes of the Query structures inside subquery - * RTEs. We have to ensure that this gets propagated back to the - * master copy. However, if we aren't done planning yet, we also - * need to ensure that subsequent calls to grouping_planner have - * virgin sub-Queries to work from. So, if we are at the last - * list entry, just copy the subquery rangetable back to the master - * copy; if we are not, then extend the master copy by adding - * whatever the subquery added. (We assume these added entries - * will go untouched by the future grouping_planner calls. We are - * also effectively assuming that sub-Queries will get planned - * identically each time, or at least that the impacts on their - * rangetables will be the same each time. Did I say this is ugly?) + * Planning of the subquery might have modified the rangetable, either by + * addition of RTEs due to expansion of inherited source tables, or by + * changes of the Query structures inside subquery RTEs. We have to + * ensure that this gets propagated back to the master copy. However, + * if we aren't done planning yet, we also need to ensure that + * subsequent calls to grouping_planner have virgin sub-Queries to + * work from. So, if we are at the last list entry, just copy the + * subquery rangetable back to the master copy; if we are not, then + * extend the master copy by adding whatever the subquery added. (We + * assume these added entries will go untouched by the future + * grouping_planner calls. We are also effectively assuming that + * sub-Queries will get planned identically each time, or at least + * that the impacts on their rangetables will be the same each time. + * Did I say this is ugly?) */ if (lnext(l) == NULL) parse->rtable = subroot.parse->rtable; else { - int subrtlength = list_length(subroot.parse->rtable); + int subrtlength = list_length(subroot.parse->rtable); if (subrtlength > mainrtlength) { @@ -666,38 +663,37 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) List *set_sortclauses; /* - * If there's a top-level ORDER BY, assume we have to fetch all - * the tuples. This might seem too simplistic given all the - * hackery below to possibly avoid the sort ... but a nonzero - * tuple_fraction is only of use to plan_set_operations() when - * the setop is UNION ALL, and the result of UNION ALL is always - * unsorted. + * If there's a top-level ORDER BY, assume we have to fetch all the + * tuples. This might seem too simplistic given all the hackery below + * to possibly avoid the sort ... but a nonzero tuple_fraction is only + * of use to plan_set_operations() when the setop is UNION ALL, and + * the result of UNION ALL is always unsorted. */ if (parse->sortClause) tuple_fraction = 0.0; /* - * Construct the plan for set operations. The result will not - * need any work except perhaps a top-level sort and/or LIMIT. + * Construct the plan for set operations. The result will not need + * any work except perhaps a top-level sort and/or LIMIT. */ result_plan = plan_set_operations(root, tuple_fraction, &set_sortclauses); /* - * Calculate pathkeys representing the sort order (if any) of the - * set operation's result. We have to do this before overwriting - * the sort key information... + * Calculate pathkeys representing the sort order (if any) of the set + * operation's result. We have to do this before overwriting the sort + * key information... */ current_pathkeys = make_pathkeys_for_sortclauses(set_sortclauses, - result_plan->targetlist); + result_plan->targetlist); current_pathkeys = canonicalize_pathkeys(root, current_pathkeys); /* - * We should not need to call preprocess_targetlist, since we must - * be in a SELECT query node. Instead, use the targetlist - * returned by plan_set_operations (since this tells whether it - * returned any resjunk columns!), and transfer any sort key - * information from the original tlist. + * We should not need to call preprocess_targetlist, since we must be + * in a SELECT query node. Instead, use the targetlist returned by + * plan_set_operations (since this tells whether it returned any + * resjunk columns!), and transfer any sort key information from the + * original tlist. */ Assert(parse->commandType == CMD_SELECT); @@ -741,11 +737,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) tlist = preprocess_targetlist(root, tlist); /* - * Generate appropriate target list for subplan; may be different - * from tlist if grouping or aggregation is needed. + * Generate appropriate target list for subplan; may be different from + * tlist if grouping or aggregation is needed. */ sub_tlist = make_subplanTargetList(root, tlist, - &groupColIdx, &need_tlist_eval); + &groupColIdx, &need_tlist_eval); /* * Calculate pathkeys that represent grouping/ordering requirements. @@ -763,10 +759,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * Note: we do not attempt to detect duplicate aggregates here; a * somewhat-overestimated count is okay for our present purposes. * - * Note: think not that we can turn off hasAggs if we find no aggs. - * It is possible for constant-expression simplification to remove - * all explicit references to aggs, but we still have to follow - * the aggregate semantics (eg, producing only one output row). + * Note: think not that we can turn off hasAggs if we find no aggs. It is + * possible for constant-expression simplification to remove all + * explicit references to aggs, but we still have to follow the + * aggregate semantics (eg, producing only one output row). */ if (parse->hasAggs) { @@ -777,13 +773,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Figure out whether we need a sorted result from query_planner. * - * If we have a GROUP BY clause, then we want a result sorted - * properly for grouping. Otherwise, if there is an ORDER BY - * clause, we want to sort by the ORDER BY clause. (Note: if we - * have both, and ORDER BY is a superset of GROUP BY, it would be - * tempting to request sort by ORDER BY --- but that might just - * leave us failing to exploit an available sort order at all. - * Needs more thought...) + * If we have a GROUP BY clause, then we want a result sorted properly + * for grouping. Otherwise, if there is an ORDER BY clause, we want + * to sort by the ORDER BY clause. (Note: if we have both, and ORDER + * BY is a superset of GROUP BY, it would be tempting to request sort + * by ORDER BY --- but that might just leave us failing to exploit an + * available sort order at all. Needs more thought...) */ if (parse->groupClause) root->query_pathkeys = root->group_pathkeys; @@ -793,10 +788,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) root->query_pathkeys = NIL; /* - * Generate the best unsorted and presorted paths for this Query - * (but note there may not be any presorted path). query_planner - * will also estimate the number of groups in the query, and - * canonicalize all the pathkeys. + * Generate the best unsorted and presorted paths for this Query (but + * note there may not be any presorted path). query_planner will also + * estimate the number of groups in the query, and canonicalize all + * the pathkeys. */ query_planner(root, sub_tlist, tuple_fraction, &cheapest_path, &sorted_path, &dNumGroups); @@ -820,8 +815,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Select the best path. If we are doing hashed grouping, we will - * always read all the input tuples, so use the cheapest-total - * path. Otherwise, trust query_planner's decision about which to use. + * always read all the input tuples, so use the cheapest-total path. + * Otherwise, trust query_planner's decision about which to use. */ if (use_hashed_grouping || !sorted_path) best_path = cheapest_path; @@ -829,10 +824,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) best_path = sorted_path; /* - * Check to see if it's possible to optimize MIN/MAX aggregates. - * If so, we will forget all the work we did so far to choose a - * "regular" path ... but we had to do it anyway to be able to - * tell which way is cheaper. + * Check to see if it's possible to optimize MIN/MAX aggregates. If + * so, we will forget all the work we did so far to choose a "regular" + * path ... but we had to do it anyway to be able to tell which way is + * cheaper. */ result_plan = optimize_minmax_aggregates(root, tlist, @@ -840,8 +835,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) if (result_plan != NULL) { /* - * optimize_minmax_aggregates generated the full plan, with - * the right tlist, and it has no sort order. + * optimize_minmax_aggregates generated the full plan, with the + * right tlist, and it has no sort order. */ current_pathkeys = NIL; } @@ -985,8 +980,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * GROUP BY without aggregation, so insert a group node (plus * the appropriate sort node, if necessary). * - * Add an explicit sort if we couldn't make the path come - * out the way the GROUP node needs it. + * Add an explicit sort if we couldn't make the path come out the + * way the GROUP node needs it. */ if (!pathkeys_contained_in(group_pathkeys, current_pathkeys)) { @@ -1014,11 +1009,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * This is a degenerate case in which we are supposed to emit * either 0 or 1 row depending on whether HAVING succeeds. * Furthermore, there cannot be any variables in either HAVING - * or the targetlist, so we actually do not need the FROM table - * at all! We can just throw away the plan-so-far and generate - * a Result node. This is a sufficiently unusual corner case - * that it's not worth contorting the structure of this routine - * to avoid having to generate the plan in the first place. + * or the targetlist, so we actually do not need the FROM + * table at all! We can just throw away the plan-so-far and + * generate a Result node. This is a sufficiently unusual + * corner case that it's not worth contorting the structure of + * this routine to avoid having to generate the plan in the + * first place. */ result_plan = (Plan *) make_result(tlist, parse->havingQual, @@ -1028,8 +1024,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) } /* end of if (setOperations) */ /* - * If we were not able to make the plan come out in the right order, - * add an explicit sort step. + * If we were not able to make the plan come out in the right order, add + * an explicit sort step. */ if (parse->sortClause) { @@ -1051,9 +1047,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) result_plan = (Plan *) make_unique(result_plan, parse->distinctClause); /* - * If there was grouping or aggregation, leave plan_rows as-is - * (ie, assume the result was already mostly unique). If not, - * use the number of distinct-groups calculated by query_planner. + * If there was grouping or aggregation, leave plan_rows as-is (ie, + * assume the result was already mostly unique). If not, use the + * number of distinct-groups calculated by query_planner. */ if (!parse->groupClause && !root->hasHavingQual && !parse->hasAggs) result_plan->plan_rows = dNumGroups; @@ -1072,8 +1068,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) } /* - * Return the actual output ordering in query_pathkeys for possible - * use by an outer query level. + * Return the actual output ordering in query_pathkeys for possible use by + * an outer query level. */ root->query_pathkeys = current_pathkeys; @@ -1084,7 +1080,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses * * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the - * results back in *count_est and *offset_est. These variables are set to + * results back in *count_est and *offset_est. These variables are set to * 0 if the corresponding clause is not present, and -1 if it's present * but we couldn't estimate the value for it. (The "0" convention is OK * for OFFSET but a little bit bogus for LIMIT: effectively we estimate @@ -1093,7 +1089,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * be passed to make_limit, which see if you change this code. * * The return value is the suitably adjusted tuple_fraction to use for - * planning the query. This adjustment is not overridable, since it reflects + * planning the query. This adjustment is not overridable, since it reflects * plan actions that grouping_planner() will certainly take, not assumptions * about context. */ @@ -1120,7 +1116,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, if (((Const *) est)->constisnull) { /* NULL indicates LIMIT ALL, ie, no limit */ - *count_est = 0; /* treat as not present */ + *count_est = 0; /* treat as not present */ } else { @@ -1143,7 +1139,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, if (((Const *) est)->constisnull) { /* Treat NULL as no offset; the executor will too */ - *offset_est = 0; /* treat as not present */ + *offset_est = 0; /* treat as not present */ } else { @@ -1217,11 +1213,11 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, else if (*offset_est != 0 && tuple_fraction > 0.0) { /* - * We have an OFFSET but no LIMIT. This acts entirely differently - * from the LIMIT case: here, we need to increase rather than - * decrease the caller's tuple_fraction, because the OFFSET acts - * to cause more tuples to be fetched instead of fewer. This only - * matters if we got a tuple_fraction > 0, however. + * We have an OFFSET but no LIMIT. This acts entirely differently + * from the LIMIT case: here, we need to increase rather than decrease + * the caller's tuple_fraction, because the OFFSET acts to cause more + * tuples to be fetched instead of fewer. This only matters if we got + * a tuple_fraction > 0, however. * * As above, use 10% if OFFSET is present but unestimatable. */ @@ -1232,9 +1228,9 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, /* * If we have absolute counts from both caller and OFFSET, add them - * together; likewise if they are both fractional. If one is - * fractional and the other absolute, we want to take the larger, - * and we heuristically assume that's the fractional one. + * together; likewise if they are both fractional. If one is + * fractional and the other absolute, we want to take the larger, and + * we heuristically assume that's the fractional one. */ if (tuple_fraction >= 1.0) { @@ -1260,7 +1256,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, /* both fractional, so add them together */ tuple_fraction += limit_fraction; if (tuple_fraction >= 1.0) - tuple_fraction = 0.0; /* assume fetch all */ + tuple_fraction = 0.0; /* assume fetch all */ } } } @@ -1303,9 +1299,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, * Don't do it if it doesn't look like the hashtable will fit into * work_mem. * - * Beware here of the possibility that cheapest_path->parent is NULL. - * This could happen if user does something silly like - * SELECT 'foo' GROUP BY 1; + * Beware here of the possibility that cheapest_path->parent is NULL. This + * could happen if user does something silly like SELECT 'foo' GROUP BY 1; */ if (cheapest_path->parent) { @@ -1314,8 +1309,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, } else { - cheapest_path_rows = 1; /* assume non-set result */ - cheapest_path_width = 100; /* arbitrary */ + cheapest_path_rows = 1; /* assume non-set result */ + cheapest_path_width = 100; /* arbitrary */ } /* Estimate per-hash-entry space at tuple width... */ @@ -1329,23 +1324,19 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, return false; /* - * See if the estimated cost is no more than doing it the other way. - * While avoiding the need for sorted input is usually a win, the fact - * that the output won't be sorted may be a loss; so we need to do an - * actual cost comparison. + * See if the estimated cost is no more than doing it the other way. While + * avoiding the need for sorted input is usually a win, the fact that the + * output won't be sorted may be a loss; so we need to do an actual cost + * comparison. * - * We need to consider - * cheapest_path + hashagg [+ final sort] - * versus either - * cheapest_path [+ sort] + group or agg [+ final sort] - * or - * presorted_path + group or agg [+ final sort] - * where brackets indicate a step that may not be needed. We assume - * query_planner() will have returned a presorted path only if it's a - * winner compared to cheapest_path for this purpose. + * We need to consider cheapest_path + hashagg [+ final sort] versus either + * cheapest_path [+ sort] + group or agg [+ final sort] or presorted_path + * + group or agg [+ final sort] where brackets indicate a step that may + * not be needed. We assume query_planner() will have returned a presorted + * path only if it's a winner compared to cheapest_path for this purpose. * - * These path variables are dummies that just hold cost fields; we don't - * make actual Paths for these steps. + * These path variables are dummies that just hold cost fields; we don't make + * actual Paths for these steps. */ cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs, numGroupCols, dNumGroups, @@ -1502,8 +1493,8 @@ make_subplanTargetList(PlannerInfo *root, /* * Otherwise, start with a "flattened" tlist (having just the vars - * mentioned in the targetlist and HAVING qual --- but not upper- - * level Vars; they will be replaced by Params later on). + * mentioned in the targetlist and HAVING qual --- but not upper- level + * Vars; they will be replaced by Params later on). */ sub_tlist = flatten_tlist(tlist); extravars = pull_var_clause(parse->havingQual, false); @@ -1513,9 +1504,8 @@ make_subplanTargetList(PlannerInfo *root, /* * If grouping, create sub_tlist entries for all GROUP BY expressions - * (GROUP BY items that are simple Vars should be in the list - * already), and make an array showing where the group columns are in - * the sub_tlist. + * (GROUP BY items that are simple Vars should be in the list already), + * and make an array showing where the group columns are in the sub_tlist. */ numCols = list_length(parse->groupClause); if (numCols > 0) @@ -1634,7 +1624,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist) Assert(orig_tlist_item != NULL); orig_tle = (TargetEntry *) lfirst(orig_tlist_item); orig_tlist_item = lnext(orig_tlist_item); - if (orig_tle->resjunk) /* should not happen */ + if (orig_tle->resjunk) /* should not happen */ elog(ERROR, "resjunk output columns are not implemented"); Assert(new_tle->resno == orig_tle->resno); new_tle->ressortgroupref = orig_tle->ressortgroupref; diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index fe01555a3c4..2ca616e118b 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.114 2005/09/05 18:59:38 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.115 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,7 +38,7 @@ typedef struct int num_vars; /* number of plain Var tlist entries */ bool has_non_vars; /* are there non-plain-Var entries? */ /* array of num_vars entries: */ - tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */ + tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */ } indexed_tlist; /* VARIABLE LENGTH STRUCT */ typedef struct @@ -64,28 +64,28 @@ static void fix_expr_references(Plan *plan, Node *node); static bool fix_expr_references_walker(Node *node, void *context); static void set_join_references(Join *join, List *rtable); static void set_inner_join_references(Plan *inner_plan, - List *rtable, - indexed_tlist *outer_itlist); + List *rtable, + indexed_tlist *outer_itlist); static void set_uppernode_references(Plan *plan, Index subvarno); static indexed_tlist *build_tlist_index(List *tlist); static Var *search_indexed_tlist_for_var(Var *var, - indexed_tlist *itlist, - Index newvarno); + indexed_tlist *itlist, + Index newvarno); static Var *search_indexed_tlist_for_non_var(Node *node, - indexed_tlist *itlist, - Index newvarno); + indexed_tlist *itlist, + Index newvarno); static List *join_references(List *clauses, - List *rtable, - indexed_tlist *outer_itlist, - indexed_tlist *inner_itlist, - Index acceptable_rel); + List *rtable, + indexed_tlist *outer_itlist, + indexed_tlist *inner_itlist, + Index acceptable_rel); static Node *join_references_mutator(Node *node, join_references_context *context); static Node *replace_vars_with_subplan_refs(Node *node, - indexed_tlist *subplan_itlist, - Index subvarno); + indexed_tlist *subplan_itlist, + Index subvarno); static Node *replace_vars_with_subplan_refs_mutator(Node *node, - replace_vars_with_subplan_refs_context *context); + replace_vars_with_subplan_refs_context *context); static bool fix_opfuncids_walker(Node *node, void *context); static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr); @@ -99,7 +99,7 @@ static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr); /* * set_plan_references * - * This is the final processing pass of the planner/optimizer. The plan + * This is the final processing pass of the planner/optimizer. The plan * tree is complete; we just have to adjust some representational details * for the convenience of the executor. We update Vars in upper plan nodes * to refer to the outputs of their subplans, and we compute regproc OIDs @@ -150,22 +150,22 @@ set_plan_references(Plan *plan, List *rtable) fix_expr_references(plan, (Node *) ((IndexScan *) plan)->indexqual); fix_expr_references(plan, - (Node *) ((IndexScan *) plan)->indexqualorig); + (Node *) ((IndexScan *) plan)->indexqualorig); break; case T_BitmapIndexScan: /* no need to fix targetlist and qual */ Assert(plan->targetlist == NIL); Assert(plan->qual == NIL); fix_expr_references(plan, - (Node *) ((BitmapIndexScan *) plan)->indexqual); + (Node *) ((BitmapIndexScan *) plan)->indexqual); fix_expr_references(plan, - (Node *) ((BitmapIndexScan *) plan)->indexqualorig); + (Node *) ((BitmapIndexScan *) plan)->indexqualorig); break; case T_BitmapHeapScan: fix_expr_references(plan, (Node *) plan->targetlist); fix_expr_references(plan, (Node *) plan->qual); fix_expr_references(plan, - (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig); + (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig); break; case T_TidScan: fix_expr_references(plan, (Node *) plan->targetlist); @@ -200,7 +200,7 @@ set_plan_references(Plan *plan, List *rtable) fix_expr_references(plan, (Node *) plan->qual); fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual); fix_expr_references(plan, - (Node *) ((MergeJoin *) plan)->mergeclauses); + (Node *) ((MergeJoin *) plan)->mergeclauses); break; case T_HashJoin: set_join_references((Join *) plan, rtable); @@ -208,7 +208,7 @@ set_plan_references(Plan *plan, List *rtable) fix_expr_references(plan, (Node *) plan->qual); fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual); fix_expr_references(plan, - (Node *) ((HashJoin *) plan)->hashclauses); + (Node *) ((HashJoin *) plan)->hashclauses); break; case T_Hash: case T_Material: @@ -218,24 +218,24 @@ set_plan_references(Plan *plan, List *rtable) /* * These plan types don't actually bother to evaluate their - * targetlists (because they just return their unmodified - * input tuples). The optimizer is lazy about creating really - * valid targetlists for them --- it tends to just put in a - * pointer to the child plan node's tlist. Hence, we leave - * the tlist alone. In particular, we do not want to process - * subplans in the tlist, since we will likely end up reprocessing - * subplans that also appear in lower levels of the plan tree! + * targetlists (because they just return their unmodified input + * tuples). The optimizer is lazy about creating really valid + * targetlists for them --- it tends to just put in a pointer to + * the child plan node's tlist. Hence, we leave the tlist alone. + * In particular, we do not want to process subplans in the tlist, + * since we will likely end up reprocessing subplans that also + * appear in lower levels of the plan tree! * - * Since these plan types don't check quals either, we should - * not find any qual expression attached to them. + * Since these plan types don't check quals either, we should not + * find any qual expression attached to them. */ Assert(plan->qual == NIL); break; case T_Limit: /* - * Like the plan types above, Limit doesn't evaluate its tlist - * or quals. It does have live expressions for limit/offset, + * Like the plan types above, Limit doesn't evaluate its tlist or + * quals. It does have live expressions for limit/offset, * however. */ Assert(plan->qual == NIL); @@ -251,8 +251,8 @@ set_plan_references(Plan *plan, List *rtable) case T_Result: /* - * Result may or may not have a subplan; no need to fix up - * subplan references if it hasn't got one... + * Result may or may not have a subplan; no need to fix up subplan + * references if it hasn't got one... * * XXX why does Result use a different subvarno from Agg/Group? */ @@ -300,9 +300,9 @@ set_plan_references(Plan *plan, List *rtable) * NOTE: it is essential that we recurse into child plans AFTER we set * subplan references in this plan's tlist and quals. If we did the * reference-adjustments bottom-up, then we would fail to match this - * plan's var nodes against the already-modified nodes of the - * children. Fortunately, that consideration doesn't apply to SubPlan - * nodes; else we'd need two passes over the expression trees. + * plan's var nodes against the already-modified nodes of the children. + * Fortunately, that consideration doesn't apply to SubPlan nodes; else + * we'd need two passes over the expression trees. */ plan->lefttree = set_plan_references(plan->lefttree, rtable); plan->righttree = set_plan_references(plan->righttree, rtable); @@ -339,8 +339,8 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable) rte->subquery->rtable); /* - * We have to process any initplans too; set_plan_references can't do - * it for us because of the possibility of double-processing. + * We have to process any initplans too; set_plan_references can't do it + * for us because of the possibility of double-processing. */ foreach(l, plan->scan.plan.initPlan) { @@ -353,12 +353,12 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable) if (trivial_subqueryscan(plan)) { /* - * We can omit the SubqueryScan node and just pull up the subplan. - * We have to merge its rtable into the outer rtable, which means + * We can omit the SubqueryScan node and just pull up the subplan. We + * have to merge its rtable into the outer rtable, which means * adjusting varnos throughout the subtree. */ - int rtoffset = list_length(rtable); - List *sub_rtable; + int rtoffset = list_length(rtable); + List *sub_rtable; sub_rtable = copyObject(rte->subquery->rtable); range_table_walker(sub_rtable, @@ -382,11 +382,11 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable) else { /* - * Keep the SubqueryScan node. We have to do the processing that - * set_plan_references would otherwise have done on it. Notice - * we do not do set_uppernode_references() here, because a - * SubqueryScan will always have been created with correct - * references to its subplan's outputs to begin with. + * Keep the SubqueryScan node. We have to do the processing that + * set_plan_references would otherwise have done on it. Notice we do + * not do set_uppernode_references() here, because a SubqueryScan will + * always have been created with correct references to its subplan's + * outputs to begin with. */ result = (Plan *) plan; @@ -532,9 +532,9 @@ adjust_plan_varnos(Plan *plan, int rtoffset) case T_SetOp: /* - * Even though the targetlist won't be used by the executor, - * we fix it up for possible use by EXPLAIN (not to mention - * ease of debugging --- wrong varnos are very confusing). + * Even though the targetlist won't be used by the executor, we + * fix it up for possible use by EXPLAIN (not to mention ease of + * debugging --- wrong varnos are very confusing). */ adjust_expr_varnos((Node *) plan->targetlist, rtoffset); Assert(plan->qual == NIL); @@ -542,8 +542,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset) case T_Limit: /* - * Like the plan types above, Limit doesn't evaluate its tlist - * or quals. It does have live expressions for limit/offset, + * Like the plan types above, Limit doesn't evaluate its tlist or + * quals. It does have live expressions for limit/offset, * however. */ adjust_expr_varnos((Node *) plan->targetlist, rtoffset); @@ -590,8 +590,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset) /* * Now recurse into child plans. * - * We don't need to (and in fact mustn't) recurse into subqueries, - * so no need to examine initPlan list. + * We don't need to (and in fact mustn't) recurse into subqueries, so no need + * to examine initPlan list. */ adjust_plan_varnos(plan->lefttree, rtoffset); adjust_plan_varnos(plan->righttree, rtoffset); @@ -603,7 +603,7 @@ adjust_plan_varnos(Plan *plan, int rtoffset) * * This is different from the rewriter's OffsetVarNodes in that it has to * work on an already-planned expression tree; in particular, we should not - * disturb INNER and OUTER references. On the other hand, we don't have to + * disturb INNER and OUTER references. On the other hand, we don't have to * recurse into subqueries nor deal with outer-level Vars, so it's pretty * simple. */ @@ -763,10 +763,10 @@ set_inner_join_references(Plan *inner_plan, if (IsA(inner_plan, IndexScan)) { /* - * An index is being used to reduce the number of tuples - * scanned in the inner relation. If there are join clauses - * being used with the index, we must update their outer-rel - * var nodes to refer to the outer side of the join. + * An index is being used to reduce the number of tuples scanned in + * the inner relation. If there are join clauses being used with the + * index, we must update their outer-rel var nodes to refer to the + * outer side of the join. */ IndexScan *innerscan = (IndexScan *) inner_plan; List *indexqualorig = innerscan->indexqualorig; @@ -789,9 +789,9 @@ set_inner_join_references(Plan *inner_plan, innerrel); /* - * We must fix the inner qpqual too, if it has join - * clauses (this could happen if special operators are - * involved: some indexquals may get rechecked as qpquals). + * We must fix the inner qpqual too, if it has join clauses (this + * could happen if special operators are involved: some indexquals + * may get rechecked as qpquals). */ if (NumRelids((Node *) inner_plan->qual) > 1) inner_plan->qual = join_references(inner_plan->qual, @@ -832,11 +832,11 @@ set_inner_join_references(Plan *inner_plan, else if (IsA(inner_plan, BitmapHeapScan)) { /* - * The inner side is a bitmap scan plan. Fix the top node, - * and recurse to get the lower nodes. + * The inner side is a bitmap scan plan. Fix the top node, and + * recurse to get the lower nodes. * - * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig - * if they are duplicated in qpqual, so must test these independently. + * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig if + * they are duplicated in qpqual, so must test these independently. */ BitmapHeapScan *innerscan = (BitmapHeapScan *) inner_plan; Index innerrel = innerscan->scan.scanrelid; @@ -851,9 +851,9 @@ set_inner_join_references(Plan *inner_plan, innerrel); /* - * We must fix the inner qpqual too, if it has join - * clauses (this could happen if special operators are - * involved: some indexquals may get rechecked as qpquals). + * We must fix the inner qpqual too, if it has join clauses (this + * could happen if special operators are involved: some indexquals may + * get rechecked as qpquals). */ if (NumRelids((Node *) inner_plan->qual) > 1) inner_plan->qual = join_references(inner_plan->qual, @@ -870,8 +870,8 @@ set_inner_join_references(Plan *inner_plan, else if (IsA(inner_plan, BitmapAnd)) { /* All we need do here is recurse */ - BitmapAnd *innerscan = (BitmapAnd *) inner_plan; - ListCell *l; + BitmapAnd *innerscan = (BitmapAnd *) inner_plan; + ListCell *l; foreach(l, innerscan->bitmapplans) { @@ -883,8 +883,8 @@ set_inner_join_references(Plan *inner_plan, else if (IsA(inner_plan, BitmapOr)) { /* All we need do here is recurse */ - BitmapOr *innerscan = (BitmapOr *) inner_plan; - ListCell *l; + BitmapOr *innerscan = (BitmapOr *) inner_plan; + ListCell *l; foreach(l, innerscan->bitmapplans) { @@ -963,7 +963,7 @@ set_uppernode_references(Plan *plan, Index subvarno) * * In most cases, subplan tlists will be "flat" tlists with only Vars, * so we try to optimize that case by extracting information about Vars - * in advance. Matching a parent tlist to a child is still an O(N^2) + * in advance. Matching a parent tlist to a child is still an O(N^2) * operation, but at least with a much smaller constant factor than plain * tlist_member() searches. * @@ -994,7 +994,7 @@ build_tlist_index(List *tlist) if (tle->expr && IsA(tle->expr, Var)) { - Var *var = (Var *) tle->expr; + Var *var = (Var *) tle->expr; vinfo->varno = var->varno; vinfo->varattno = var->varattno; @@ -1068,7 +1068,7 @@ search_indexed_tlist_for_non_var(Node *node, exprType((Node *) tle->expr), exprTypmod((Node *) tle->expr), 0); - newvar->varnoold = 0; /* wasn't ever a plain Var */ + newvar->varnoold = 0; /* wasn't ever a plain Var */ newvar->varoattno = 0; return newvar; } @@ -1213,7 +1213,7 @@ replace_vars_with_subplan_refs(Node *node, static Node * replace_vars_with_subplan_refs_mutator(Node *node, - replace_vars_with_subplan_refs_context *context) + replace_vars_with_subplan_refs_context *context) { Var *newvar; diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index ec037db514c..b0dc9c5bf7f 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.99 2005/06/05 22:32:56 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -110,19 +110,18 @@ replace_outer_var(Var *var) abslevel = PlannerQueryLevel - var->varlevelsup; /* - * If there's already a PlannerParamList entry for this same Var, just - * use it. NOTE: in sufficiently complex querytrees, it is possible - * for the same varno/abslevel to refer to different RTEs in different - * parts of the parsetree, so that different fields might end up - * sharing the same Param number. As long as we check the vartype as - * well, I believe that this sort of aliasing will cause no trouble. - * The correct field should get stored into the Param slot at - * execution in each part of the tree. + * If there's already a PlannerParamList entry for this same Var, just use + * it. NOTE: in sufficiently complex querytrees, it is possible for the + * same varno/abslevel to refer to different RTEs in different parts of + * the parsetree, so that different fields might end up sharing the same + * Param number. As long as we check the vartype as well, I believe that + * this sort of aliasing will cause no trouble. The correct field should + * get stored into the Param slot at execution in each part of the tree. * - * We also need to demand a match on vartypmod. This does not matter for - * the Param itself, since those are not typmod-dependent, but it does - * matter when make_subplan() instantiates a modified copy of the Var - * for a subplan's args list. + * We also need to demand a match on vartypmod. This does not matter for the + * Param itself, since those are not typmod-dependent, but it does matter + * when make_subplan() instantiates a modified copy of the Var for a + * subplan's args list. */ i = 0; foreach(ppl, PlannerParamList) @@ -179,8 +178,8 @@ replace_outer_agg(Aggref *agg) abslevel = PlannerQueryLevel - agg->agglevelsup; /* - * It does not seem worthwhile to try to match duplicate outer aggs. - * Just make a new slot every time. + * It does not seem worthwhile to try to match duplicate outer aggs. Just + * make a new slot every time. */ agg = (Aggref *) copyObject(agg); IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0); @@ -253,33 +252,32 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) Node *result; /* - * Copy the source Query node. This is a quick and dirty kluge to - * resolve the fact that the parser can generate trees with multiple - * links to the same sub-Query node, but the planner wants to scribble - * on the Query. Try to clean this up when we do querytree redesign... + * Copy the source Query node. This is a quick and dirty kluge to resolve + * the fact that the parser can generate trees with multiple links to the + * same sub-Query node, but the planner wants to scribble on the Query. + * Try to clean this up when we do querytree redesign... */ subquery = (Query *) copyObject(subquery); /* - * For an EXISTS subplan, tell lower-level planner to expect that only - * the first tuple will be retrieved. For ALL and ANY subplans, we - * will be able to stop evaluating if the test condition fails, so - * very often not all the tuples will be retrieved; for lack of a - * better idea, specify 50% retrieval. For EXPR and MULTIEXPR - * subplans, use default behavior (we're only expecting one row out, - * anyway). + * For an EXISTS subplan, tell lower-level planner to expect that only the + * first tuple will be retrieved. For ALL and ANY subplans, we will be + * able to stop evaluating if the test condition fails, so very often not + * all the tuples will be retrieved; for lack of a better idea, specify + * 50% retrieval. For EXPR and MULTIEXPR subplans, use default behavior + * (we're only expecting one row out, anyway). * - * NOTE: if you change these numbers, also change cost_qual_eval_walker() - * in path/costsize.c. + * NOTE: if you change these numbers, also change cost_qual_eval_walker() in + * path/costsize.c. * * XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or - * materialize its result below. In that case it would've been better - * to specify full retrieval. At present, however, we can only detect + * materialize its result below. In that case it would've been better to + * specify full retrieval. At present, however, we can only detect * correlation or lack of it after we've made the subplan :-(. Perhaps - * detection of correlation should be done as a separate step. - * Meanwhile, we don't want to be too optimistic about the percentage - * of tuples retrieved, for fear of selecting a plan that's bad for - * the materialization case. + * detection of correlation should be done as a separate step. Meanwhile, + * we don't want to be too optimistic about the percentage of tuples + * retrieved, for fear of selecting a plan that's bad for the + * materialization case. */ if (slink->subLinkType == EXISTS_SUBLINK) tuple_fraction = 1.0; /* just like a LIMIT 1 */ @@ -294,8 +292,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) */ node->plan = plan = subquery_planner(subquery, tuple_fraction, NULL); - node->plan_id = PlannerPlanId++; /* Assign unique ID to this - * SubPlan */ + node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */ node->rtable = subquery->rtable; @@ -314,8 +311,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) node->args = NIL; /* - * Make parParam list of params that current query level will pass to - * this child plan. + * Make parParam list of params that current query level will pass to this + * child plan. */ tmpset = bms_copy(plan->extParam); while ((paramid = bms_first_member(tmpset)) >= 0) @@ -328,13 +325,12 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) bms_free(tmpset); /* - * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, - * or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or - * ARRAY, we just produce a Param referring to the result of - * evaluating the initPlan. For MULTIEXPR, we must build an AND or - * OR-clause of the individual comparison operators, using the - * appropriate lefthand side expressions and Params for the initPlan's - * target items. + * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or + * MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY, + * we just produce a Param referring to the result of evaluating the + * initPlan. For MULTIEXPR, we must build an AND or OR-clause of the + * individual comparison operators, using the appropriate lefthand side + * expressions and Params for the initPlan's target items. */ if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK) { @@ -387,9 +383,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) PlannerInitPlan = lappend(PlannerInitPlan, node); /* - * The executable expressions are returned to become part of the - * outer plan's expression tree; they are not kept in the initplan - * node. + * The executable expressions are returned to become part of the outer + * plan's expression tree; they are not kept in the initplan node. */ if (list_length(exprs) > 1) result = (Node *) (node->useOr ? make_orclause(exprs) : @@ -403,22 +398,22 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) ListCell *l; /* - * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types - * to initPlans, even when they are uncorrelated or undirect - * correlated, because we need to scan the output of the subplan - * for each outer tuple. But if it's an IN (= ANY) test, we might - * be able to use a hashtable to avoid comparing all the tuples. + * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to + * initPlans, even when they are uncorrelated or undirect correlated, + * because we need to scan the output of the subplan for each outer + * tuple. But if it's an IN (= ANY) test, we might be able to use a + * hashtable to avoid comparing all the tuples. */ if (subplan_is_hashable(slink, node)) node->useHashTable = true; /* - * Otherwise, we have the option to tack a MATERIAL node onto the - * top of the subplan, to reduce the cost of reading it - * repeatedly. This is pointless for a direct-correlated subplan, - * since we'd have to recompute its results each time anyway. For - * uncorrelated/undirect correlated subplans, we add MATERIAL unless - * the subplan's top plan node would materialize its output anyway. + * Otherwise, we have the option to tack a MATERIAL node onto the top + * of the subplan, to reduce the cost of reading it repeatedly. This + * is pointless for a direct-correlated subplan, since we'd have to + * recompute its results each time anyway. For uncorrelated/undirect + * correlated subplans, we add MATERIAL unless the subplan's top plan + * node would materialize its output anyway. */ else if (node->parParam == NIL) { @@ -455,9 +450,9 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) PlannerParamItem *pitem = list_nth(PlannerParamList, lfirst_int(l)); /* - * The Var or Aggref has already been adjusted to have the - * correct varlevelsup or agglevelsup. We probably don't even - * need to copy it again, but be safe. + * The Var or Aggref has already been adjusted to have the correct + * varlevelsup or agglevelsup. We probably don't even need to + * copy it again, but be safe. */ args = lappend(args, copyObject(pitem->item)); } @@ -545,8 +540,8 @@ convert_sublink_opers(List *lefthand, List *operOids, * * Note: we use make_op_expr in case runtime type conversion function * calls must be inserted for this operator! (But we are not - * expecting to have to resolve unknown Params, so it's okay to - * pass a null pstate.) + * expecting to have to resolve unknown Params, so it's okay to pass a + * null pstate.) */ result = lappend(result, make_op_expr(NULL, @@ -580,8 +575,8 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) /* * The sublink type must be "= ANY" --- that is, an IN operator. (We * require the operator name to be unqualified, which may be overly - * paranoid, or may not be.) XXX since we also check that the - * operators are hashable, the test on operator name may be redundant? + * paranoid, or may not be.) XXX since we also check that the operators + * are hashable, the test on operator name may be redundant? */ if (slink->subLinkType != ANY_SUBLINK) return false; @@ -591,15 +586,15 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) /* * The subplan must not have any direct correlation vars --- else we'd - * have to recompute its output each time, so that the hashtable - * wouldn't gain anything. + * have to recompute its output each time, so that the hashtable wouldn't + * gain anything. */ if (node->parParam != NIL) return false; /* - * The estimated size of the subquery result must fit in work_mem. - * (XXX what about hashtable overhead?) + * The estimated size of the subquery result must fit in work_mem. (XXX + * what about hashtable overhead?) */ subquery_size = node->plan->plan_rows * (MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData))); @@ -607,18 +602,17 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) return false; /* - * The combining operators must be hashable, strict, and - * self-commutative. The need for hashability is obvious, since we - * want to use hashing. Without strictness, behavior in the presence - * of nulls is too unpredictable. (We actually must assume even more - * than plain strictness, see nodeSubplan.c for details.) And - * commutativity ensures that the left and right datatypes are the - * same; this allows us to assume that the combining operators are - * equality for the righthand datatype, so that they can be used to - * compare righthand tuples as well as comparing lefthand to righthand - * tuples. (This last restriction could be relaxed by using two - * different sets of operators with the hash table, but there is no - * obvious usefulness to that at present.) + * The combining operators must be hashable, strict, and self-commutative. + * The need for hashability is obvious, since we want to use hashing. + * Without strictness, behavior in the presence of nulls is too + * unpredictable. (We actually must assume even more than plain + * strictness, see nodeSubplan.c for details.) And commutativity ensures + * that the left and right datatypes are the same; this allows us to + * assume that the combining operators are equality for the righthand + * datatype, so that they can be used to compare righthand tuples as well + * as comparing lefthand to righthand tuples. (This last restriction + * could be relaxed by using two different sets of operators with the hash + * table, but there is no obvious usefulness to that at present.) */ foreach(l, slink->operOids) { @@ -679,24 +673,24 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink) return NULL; /* - * The sub-select must not refer to any Vars of the parent query. - * (Vars of higher levels should be okay, though.) + * The sub-select must not refer to any Vars of the parent query. (Vars of + * higher levels should be okay, though.) */ if (contain_vars_of_level((Node *) subselect, 1)) return NULL; /* - * The left-hand expressions must contain some Vars of the current - * query, else it's not gonna be a join. + * The left-hand expressions must contain some Vars of the current query, + * else it's not gonna be a join. */ left_varnos = pull_varnos((Node *) sublink->lefthand); if (bms_is_empty(left_varnos)) return NULL; /* - * The left-hand expressions mustn't be volatile. (Perhaps we should - * test the combining operators, too? We'd only need to point the - * function directly at the sublink ...) + * The left-hand expressions mustn't be volatile. (Perhaps we should test + * the combining operators, too? We'd only need to point the function + * directly at the sublink ...) */ if (contain_volatile_functions((Node *) sublink->lefthand)) return NULL; @@ -704,10 +698,10 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink) /* * Okay, pull up the sub-select into top range table and jointree. * - * We rely here on the assumption that the outer query has no references - * to the inner (necessarily true, other than the Vars that we build - * below). Therefore this is a lot easier than what - * pull_up_subqueries has to go through. + * We rely here on the assumption that the outer query has no references to + * the inner (necessarily true, other than the Vars that we build below). + * Therefore this is a lot easier than what pull_up_subqueries has to go + * through. */ rte = addRangeTableEntryForSubquery(NULL, subselect, @@ -729,8 +723,8 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink) /* * Build the result qual expressions. As a side effect, - * ininfo->sub_targetlist is filled with a list of Vars representing - * the subselect outputs. + * ininfo->sub_targetlist is filled with a list of Vars representing the + * subselect outputs. */ exprs = convert_sublink_opers(sublink->lefthand, sublink->operOids, @@ -811,8 +805,7 @@ process_sublinks_mutator(Node *node, bool *isTopQual) List *lefthand; /* - * First, recursively process the lefthand-side expressions, if - * any. + * First, recursively process the lefthand-side expressions, if any. */ locTopQual = false; lefthand = (List *) @@ -825,22 +818,22 @@ process_sublinks_mutator(Node *node, bool *isTopQual) } /* - * We should never see a SubPlan expression in the input (since this - * is the very routine that creates 'em to begin with). We shouldn't - * find ourselves invoked directly on a Query, either. + * We should never see a SubPlan expression in the input (since this is + * the very routine that creates 'em to begin with). We shouldn't find + * ourselves invoked directly on a Query, either. */ Assert(!is_subplan(node)); Assert(!IsA(node, Query)); /* * Because make_subplan() could return an AND or OR clause, we have to - * take steps to preserve AND/OR flatness of a qual. We assume the - * input has been AND/OR flattened and so we need no recursion here. + * take steps to preserve AND/OR flatness of a qual. We assume the input + * has been AND/OR flattened and so we need no recursion here. * * If we recurse down through anything other than an AND node, we are - * definitely not at top qual level anymore. (Due to the coding here, - * we will not get called on the List subnodes of an AND, so no check - * is needed for List.) + * definitely not at top qual level anymore. (Due to the coding here, we + * will not get called on the List subnodes of an AND, so no check is + * needed for List.) */ if (and_clause(node)) { @@ -909,8 +902,8 @@ SS_finalize_plan(Plan *plan, List *rtable) /* * First, scan the param list to discover the sets of params that are - * available from outer query levels and my own query level. We do - * this once to save time in the per-plan recursion steps. + * available from outer query levels and my own query level. We do this + * once to save time in the per-plan recursion steps. */ paramid = 0; foreach(l, PlannerParamList) @@ -942,13 +935,12 @@ SS_finalize_plan(Plan *plan, List *rtable) bms_free(valid_params); /* - * Finally, attach any initPlans to the topmost plan node, - * and add their extParams to the topmost node's, too. + * Finally, attach any initPlans to the topmost plan node, and add their + * extParams to the topmost node's, too. * - * We also add the total_cost of each initPlan to the startup cost of - * the top node. This is a conservative overestimate, since in - * fact each initPlan might be executed later than plan startup, - * or even not at all. + * We also add the total_cost of each initPlan to the startup cost of the top + * node. This is a conservative overestimate, since in fact each initPlan + * might be executed later than plan startup, or even not at all. */ plan->initPlan = PlannerInitPlan; PlannerInitPlan = NIL; /* make sure they're not attached twice */ @@ -988,10 +980,10 @@ finalize_plan(Plan *plan, List *rtable, context.outer_params = outer_params; /* - * When we call finalize_primnode, context.paramids sets are - * automatically merged together. But when recursing to self, we have - * to do it the hard way. We want the paramids set to include params - * in subplans as well as at this level. + * When we call finalize_primnode, context.paramids sets are automatically + * merged together. But when recursing to self, we have to do it the hard + * way. We want the paramids set to include params in subplans as well as + * at this level. */ /* Find params in targetlist and qual */ @@ -1011,17 +1003,18 @@ finalize_plan(Plan *plan, List *rtable, &context); /* - * we need not look at indexqualorig, since it will have the - * same param references as indexqual. + * we need not look at indexqualorig, since it will have the same + * param references as indexqual. */ break; case T_BitmapIndexScan: finalize_primnode((Node *) ((BitmapIndexScan *) plan)->indexqual, &context); + /* - * we need not look at indexqualorig, since it will have the - * same param references as indexqual. + * we need not look at indexqualorig, since it will have the same + * param references as indexqual. */ break; @@ -1038,14 +1031,14 @@ finalize_plan(Plan *plan, List *rtable, case T_SubqueryScan: /* - * In a SubqueryScan, SS_finalize_plan has already been run on - * the subplan by the inner invocation of subquery_planner, so - * there's no need to do it again. Instead, just pull out the - * subplan's extParams list, which represents the params it - * needs from my level and higher levels. + * In a SubqueryScan, SS_finalize_plan has already been run on the + * subplan by the inner invocation of subquery_planner, so there's + * no need to do it again. Instead, just pull out the subplan's + * extParams list, which represents the params it needs from my + * level and higher levels. */ context.paramids = bms_add_members(context.paramids, - ((SubqueryScan *) plan)->subplan->extParam); + ((SubqueryScan *) plan)->subplan->extParam); break; case T_FunctionScan: @@ -1170,8 +1163,8 @@ finalize_plan(Plan *plan, List *rtable, plan->allParam = context.paramids; /* - * For speed at execution time, make sure extParam/allParam are - * actually NULL if they are empty sets. + * For speed at execution time, make sure extParam/allParam are actually + * NULL if they are empty sets. */ if (bms_is_empty(plan->extParam)) { @@ -1212,8 +1205,8 @@ finalize_primnode(Node *node, finalize_primnode_context *context) /* Add outer-level params needed by the subplan to paramids */ context->paramids = bms_join(context->paramids, - bms_intersect(subplan->plan->extParam, - context->outer_params)); + bms_intersect(subplan->plan->extParam, + context->outer_params)); /* fall through to recurse into subplan args */ } return expression_tree_walker(node, finalize_primnode, @@ -1241,7 +1234,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan, int paramid; /* - * Set up for a new level of subquery. This is just to keep + * Set up for a new level of subquery. This is just to keep * SS_finalize_plan from becoming confused. */ PlannerQueryLevel++; @@ -1262,16 +1255,15 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan, node = makeNode(SubPlan); node->subLinkType = EXPR_SUBLINK; node->plan = plan; - node->plan_id = PlannerPlanId++; /* Assign unique ID to this - * SubPlan */ + node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */ node->rtable = root->parse->rtable; PlannerInitPlan = lappend(PlannerInitPlan, node); /* - * Make parParam list of params that current query level will pass to - * this child plan. (In current usage there probably aren't any.) + * Make parParam list of params that current query level will pass to this + * child plan. (In current usage there probably aren't any.) */ tmpset = bms_copy(plan->extParam); while ((paramid = bms_first_member(tmpset)) >= 0) |