aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2016-06-10 16:03:37 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2016-06-10 16:03:46 -0400
commit2f153ddfdd318b211590dd5585f65f357d85c2de (patch)
tree503d44c77d942b3e2796429d7930e308f8fddc39 /src
parent13761bccb177022c8c0dabc08f3e9acb491b1c96 (diff)
downloadpostgresql-2f153ddfdd318b211590dd5585f65f357d85c2de.tar.gz
postgresql-2f153ddfdd318b211590dd5585f65f357d85c2de.zip
Refactor to reduce code duplication for function property checking.
As noted by Andres Freund, we'd accumulated quite a few similar functions in clauses.c that examine all functions in an expression tree to see if they satisfy some boolean test. Reduce the duplication by inventing a function check_functions_in_node() that applies a simple callback function to each SQL function OID appearing in a given expression node. This also fixes some arguable oversights; for example, contain_mutable_functions() did not check aggregate or window functions for mutability. I doubt that that represents a live bug at the moment, because we don't really consider mutability for aggregates; but it might someday be one. I chose to put check_functions_in_node() in nodeFuncs.c because it seemed like other modules might wish to use it in future. That in turn forced moving set_opfuncid() et al into nodeFuncs.c, as the alternative was for nodeFuncs.c to depend on optimizer/setrefs.c which didn't seem very clean. In passing, teach contain_leaked_vars_walker() about a few more expression node types it can safely look through, and improve the rather messy and undercommented code in has_parallel_hazard_walker(). Discussion: <20160527185853.ziol2os2zskahl7v@alap3.anarazel.de>
Diffstat (limited to 'src')
-rw-r--r--src/backend/nodes/nodeFuncs.c178
-rw-r--r--src/backend/optimizer/plan/setrefs.c63
-rw-r--r--src/backend/optimizer/util/clauses.c688
-rw-r--r--src/backend/optimizer/util/predtest.c2
-rw-r--r--src/backend/utils/cache/relcache.c2
-rw-r--r--src/include/nodes/nodeFuncs.h10
-rw-r--r--src/include/optimizer/planmain.h3
7 files changed, 371 insertions, 575 deletions
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 5facd439cac..af2a4cb8973 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -27,6 +27,7 @@
static bool expression_returns_set_walker(Node *node, void *context);
static int leftmostLoc(int loc1, int loc2);
+static bool fix_opfuncids_walker(Node *node, void *context);
static bool planstate_walk_subplans(List *plans, bool (*walker) (),
void *context);
static bool planstate_walk_members(List *plans, PlanState **planstates,
@@ -1560,6 +1561,183 @@ leftmostLoc(int loc1, int loc2)
/*
+ * fix_opfuncids
+ * Calculate opfuncid field from opno for each OpExpr node in given tree.
+ * The given tree can be anything expression_tree_walker handles.
+ *
+ * The argument is modified in-place. (This is OK since we'd want the
+ * same change for any node, even if it gets visited more than once due to
+ * shared structure.)
+ */
+void
+fix_opfuncids(Node *node)
+{
+ /* This tree walk requires no special setup, so away we go... */
+ fix_opfuncids_walker(node, NULL);
+}
+
+static bool
+fix_opfuncids_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, OpExpr))
+ set_opfuncid((OpExpr *) node);
+ else if (IsA(node, DistinctExpr))
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ else if (IsA(node, NullIfExpr))
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ else if (IsA(node, ScalarArrayOpExpr))
+ set_sa_opfuncid((ScalarArrayOpExpr *) node);
+ return expression_tree_walker(node, fix_opfuncids_walker, context);
+}
+
+/*
+ * set_opfuncid
+ * Set the opfuncid (procedure OID) in an OpExpr node,
+ * if it hasn't been set already.
+ *
+ * Because of struct equivalence, this can also be used for
+ * DistinctExpr and NullIfExpr nodes.
+ */
+void
+set_opfuncid(OpExpr *opexpr)
+{
+ if (opexpr->opfuncid == InvalidOid)
+ opexpr->opfuncid = get_opcode(opexpr->opno);
+}
+
+/*
+ * set_sa_opfuncid
+ * As above, for ScalarArrayOpExpr nodes.
+ */
+void
+set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
+{
+ if (opexpr->opfuncid == InvalidOid)
+ opexpr->opfuncid = get_opcode(opexpr->opno);
+}
+
+
+/*
+ * check_functions_in_node -
+ * apply checker() to each function OID contained in given expression node
+ *
+ * Returns TRUE if the checker() function does; for nodes representing more
+ * than one function call, returns TRUE if the checker() function does so
+ * for any of those functions. Returns FALSE if node does not invoke any
+ * SQL-visible function. Caller must not pass node == NULL.
+ *
+ * This function examines only the given node; it does not recurse into any
+ * sub-expressions. Callers typically prefer to keep control of the recursion
+ * for themselves, in case additional checks should be made, or because they
+ * have special rules about which parts of the tree need to be visited.
+ *
+ * Note: we ignore MinMaxExpr, XmlExpr, and CoerceToDomain nodes, because they
+ * do not contain SQL function OIDs. However, they can invoke SQL-visible
+ * functions, so callers should take thought about how to treat them.
+ */
+bool
+check_functions_in_node(Node *node, check_function_callback checker,
+ void *context)
+{
+ switch (nodeTag(node))
+ {
+ case T_Aggref:
+ {
+ Aggref *expr = (Aggref *) node;
+
+ if (checker(expr->aggfnoid, context))
+ return true;
+ }
+ break;
+ case T_WindowFunc:
+ {
+ WindowFunc *expr = (WindowFunc *) node;
+
+ if (checker(expr->winfnoid, context))
+ return true;
+ }
+ break;
+ case T_FuncExpr:
+ {
+ FuncExpr *expr = (FuncExpr *) node;
+
+ if (checker(expr->funcid, context))
+ return true;
+ }
+ break;
+ case T_OpExpr:
+ case T_DistinctExpr: /* struct-equivalent to OpExpr */
+ case T_NullIfExpr: /* struct-equivalent to OpExpr */
+ {
+ OpExpr *expr = (OpExpr *) node;
+
+ /* Set opfuncid if it wasn't set already */
+ set_opfuncid(expr);
+ if (checker(expr->opfuncid, context))
+ return true;
+ }
+ break;
+ case T_ScalarArrayOpExpr:
+ {
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+
+ set_sa_opfuncid(expr);
+ if (checker(expr->opfuncid, context))
+ return true;
+ }
+ break;
+ case T_CoerceViaIO:
+ {
+ CoerceViaIO *expr = (CoerceViaIO *) node;
+ Oid iofunc;
+ Oid typioparam;
+ bool typisvarlena;
+
+ /* check the result type's input function */
+ getTypeInputInfo(expr->resulttype,
+ &iofunc, &typioparam);
+ if (checker(iofunc, context))
+ return true;
+ /* check the input type's output function */
+ getTypeOutputInfo(exprType((Node *) expr->arg),
+ &iofunc, &typisvarlena);
+ if (checker(iofunc, context))
+ return true;
+ }
+ break;
+ case T_ArrayCoerceExpr:
+ {
+ ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+
+ if (OidIsValid(expr->elemfuncid) &&
+ checker(expr->elemfuncid, context))
+ return true;
+ }
+ break;
+ case T_RowCompareExpr:
+ {
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+ ListCell *opid;
+
+ foreach(opid, rcexpr->opnos)
+ {
+ Oid opfuncid = get_opcode(lfirst_oid(opid));
+
+ if (checker(opfuncid, context))
+ return true;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+
+/*
* Standard expression-tree walking support
*
* We used to have near-duplicate code in many different routines that
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 9b690cf66e9..f7f0746ab3e 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -147,7 +147,6 @@ static List *set_returning_clause_references(PlannerInfo *root,
Plan *topplan,
Index resultRelation,
int rtoffset);
-static bool fix_opfuncids_walker(Node *node, void *context);
static bool extract_query_dependencies_walker(Node *node,
PlannerInfo *context);
@@ -2557,68 +2556,6 @@ set_returning_clause_references(PlannerInfo *root,
/*****************************************************************************
- * OPERATOR REGPROC LOOKUP
- *****************************************************************************/
-
-/*
- * fix_opfuncids
- * Calculate opfuncid field from opno for each OpExpr node in given tree.
- * The given tree can be anything expression_tree_walker handles.
- *
- * The argument is modified in-place. (This is OK since we'd want the
- * same change for any node, even if it gets visited more than once due to
- * shared structure.)
- */
-void
-fix_opfuncids(Node *node)
-{
- /* This tree walk requires no special setup, so away we go... */
- fix_opfuncids_walker(node, NULL);
-}
-
-static bool
-fix_opfuncids_walker(Node *node, void *context)
-{
- if (node == NULL)
- return false;
- if (IsA(node, OpExpr))
- set_opfuncid((OpExpr *) node);
- else if (IsA(node, DistinctExpr))
- set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
- else if (IsA(node, NullIfExpr))
- set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
- else if (IsA(node, ScalarArrayOpExpr))
- set_sa_opfuncid((ScalarArrayOpExpr *) node);
- return expression_tree_walker(node, fix_opfuncids_walker, context);
-}
-
-/*
- * set_opfuncid
- * Set the opfuncid (procedure OID) in an OpExpr node,
- * if it hasn't been set already.
- *
- * Because of struct equivalence, this can also be used for
- * DistinctExpr and NullIfExpr nodes.
- */
-void
-set_opfuncid(OpExpr *opexpr)
-{
- if (opexpr->opfuncid == InvalidOid)
- opexpr->opfuncid = get_opcode(opexpr->opno);
-}
-
-/*
- * set_sa_opfuncid
- * As above, for ScalarArrayOpExpr nodes.
- */
-void
-set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
-{
- if (opexpr->opfuncid == InvalidOid)
- opexpr->opfuncid = get_opcode(opexpr->opno);
-}
-
-/*****************************************************************************
* QUERY DEPENDENCY MANAGEMENT
*****************************************************************************/
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index dc814a2b3ac..8d31204b621 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -113,8 +113,6 @@ static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
static bool has_parallel_hazard_walker(Node *node,
has_parallel_hazard_arg *context);
-static bool parallel_too_dangerous(char proparallel,
- has_parallel_hazard_arg *context);
static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool contain_leaked_vars_walker(Node *node, void *context);
static Relids find_nonnullable_rels_walker(Node *node, bool top_level);
@@ -994,95 +992,33 @@ contain_mutable_functions(Node *clause)
}
static bool
+contain_mutable_functions_checker(Oid func_id, void *context)
+{
+ return (func_volatile(func_id) != PROVOLATILE_IMMUTABLE);
+}
+
+static bool
contain_mutable_functions_walker(Node *node, void *context)
{
if (node == NULL)
return false;
- if (IsA(node, FuncExpr))
- {
- FuncExpr *expr = (FuncExpr *) node;
-
- if (func_volatile(expr->funcid) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, OpExpr))
- {
- OpExpr *expr = (OpExpr *) node;
-
- set_opfuncid(expr);
- if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, DistinctExpr))
- {
- DistinctExpr *expr = (DistinctExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, NullIfExpr))
- {
- NullIfExpr *expr = (NullIfExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, ScalarArrayOpExpr))
- {
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
-
- set_sa_opfuncid(expr);
- if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, CoerceViaIO))
- {
- CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
-
- /* check the result type's input function */
- getTypeInputInfo(expr->resulttype,
- &iofunc, &typioparam);
- if (func_volatile(iofunc) != PROVOLATILE_IMMUTABLE)
- return true;
- /* check the input type's output function */
- getTypeOutputInfo(exprType((Node *) expr->arg),
- &iofunc, &typisvarlena);
- if (func_volatile(iofunc) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, ArrayCoerceExpr))
- {
- ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+ /* Check for mutable functions in node itself */
+ if (check_functions_in_node(node, contain_mutable_functions_checker,
+ context))
+ return true;
- if (OidIsValid(expr->elemfuncid) &&
- func_volatile(expr->elemfuncid) != PROVOLATILE_IMMUTABLE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, RowCompareExpr))
- {
- RowCompareExpr *rcexpr = (RowCompareExpr *) node;
- ListCell *opid;
+ /*
+ * It should be safe to treat MinMaxExpr as immutable, because it will
+ * depend on a non-cross-type btree comparison function, and those should
+ * always be immutable. Treating XmlExpr as immutable is more dubious,
+ * and treating CoerceToDomain as immutable is outright dangerous. But we
+ * have done so historically, and changing this would probably cause more
+ * problems than it would fix. In practice, if you have a non-immutable
+ * domain constraint you are in for pain anyhow.
+ */
- foreach(opid, rcexpr->opnos)
- {
- if (op_volatile(lfirst_oid(opid)) != PROVOLATILE_IMMUTABLE)
- return true;
- }
- /* else fall through to check args */
- }
- else if (IsA(node, Query))
+ /* Recurse to check arguments */
+ if (IsA(node, Query))
{
/* Recurse into subselects */
return query_tree_walker((Query *) node,
@@ -1122,110 +1058,29 @@ contain_volatile_functions(Node *clause)
return contain_volatile_functions_walker(clause, NULL);
}
-bool
-contain_volatile_functions_not_nextval(Node *clause)
+static bool
+contain_volatile_functions_checker(Oid func_id, void *context)
{
- return contain_volatile_functions_not_nextval_walker(clause, NULL);
+ return (func_volatile(func_id) == PROVOLATILE_VOLATILE);
}
-/*
- * General purpose code for checking expression volatility.
- *
- * Special purpose code for use in COPY is almost identical to this,
- * so any changes here may also be needed in other contain_volatile...
- * functions.
- */
static bool
contain_volatile_functions_walker(Node *node, void *context)
{
if (node == NULL)
return false;
- if (IsA(node, FuncExpr))
- {
- FuncExpr *expr = (FuncExpr *) node;
-
- if (func_volatile(expr->funcid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, OpExpr))
- {
- OpExpr *expr = (OpExpr *) node;
-
- set_opfuncid(expr);
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, DistinctExpr))
- {
- DistinctExpr *expr = (DistinctExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, NullIfExpr))
- {
- NullIfExpr *expr = (NullIfExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, ScalarArrayOpExpr))
- {
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
-
- set_sa_opfuncid(expr);
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, CoerceViaIO))
- {
- CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
-
- /* check the result type's input function */
- getTypeInputInfo(expr->resulttype,
- &iofunc, &typioparam);
- if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
- return true;
- /* check the input type's output function */
- getTypeOutputInfo(exprType((Node *) expr->arg),
- &iofunc, &typisvarlena);
- if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, ArrayCoerceExpr))
- {
- ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+ /* Check for volatile functions in node itself */
+ if (check_functions_in_node(node, contain_volatile_functions_checker,
+ context))
+ return true;
- if (OidIsValid(expr->elemfuncid) &&
- func_volatile(expr->elemfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, RowCompareExpr))
- {
- /* RowCompare probably can't have volatile ops, but check anyway */
- RowCompareExpr *rcexpr = (RowCompareExpr *) node;
- ListCell *opid;
+ /*
+ * See notes in contain_mutable_functions_walker about why we treat
+ * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable.
+ */
- foreach(opid, rcexpr->opnos)
- {
- if (op_volatile(lfirst_oid(opid)) == PROVOLATILE_VOLATILE)
- return true;
- }
- /* else fall through to check args */
- }
- else if (IsA(node, Query))
+ /* Recurse to check arguments */
+ if (IsA(node, Query))
{
/* Recurse into subselects */
return query_tree_walker((Query *) node,
@@ -1237,104 +1092,48 @@ contain_volatile_functions_walker(Node *node, void *context)
}
/*
- * Special purpose version of contain_volatile_functions for use in COPY
+ * Special purpose version of contain_volatile_functions() for use in COPY:
+ * ignore nextval(), but treat all other functions normally.
*/
+bool
+contain_volatile_functions_not_nextval(Node *clause)
+{
+ return contain_volatile_functions_not_nextval_walker(clause, NULL);
+}
+
+static bool
+contain_volatile_functions_not_nextval_checker(Oid func_id, void *context)
+{
+ return (func_id != F_NEXTVAL_OID &&
+ func_volatile(func_id) == PROVOLATILE_VOLATILE);
+}
+
static bool
contain_volatile_functions_not_nextval_walker(Node *node, void *context)
{
if (node == NULL)
return false;
- if (IsA(node, FuncExpr))
- {
- FuncExpr *expr = (FuncExpr *) node;
-
- /*
- * For this case only, we want to ignore the volatility of the
- * nextval() function, since some callers want this.
- */
- if (expr->funcid != F_NEXTVAL_OID &&
- func_volatile(expr->funcid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, OpExpr))
- {
- OpExpr *expr = (OpExpr *) node;
-
- set_opfuncid(expr);
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, DistinctExpr))
- {
- DistinctExpr *expr = (DistinctExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, NullIfExpr))
- {
- NullIfExpr *expr = (NullIfExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, ScalarArrayOpExpr))
- {
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+ /* Check for volatile functions in node itself */
+ if (check_functions_in_node(node,
+ contain_volatile_functions_not_nextval_checker,
+ context))
+ return true;
- set_sa_opfuncid(expr);
- if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, CoerceViaIO))
- {
- CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
-
- /* check the result type's input function */
- getTypeInputInfo(expr->resulttype,
- &iofunc, &typioparam);
- if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
- return true;
- /* check the input type's output function */
- getTypeOutputInfo(exprType((Node *) expr->arg),
- &iofunc, &typisvarlena);
- if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, ArrayCoerceExpr))
- {
- ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+ /*
+ * See notes in contain_mutable_functions_walker about why we treat
+ * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable.
+ */
- if (OidIsValid(expr->elemfuncid) &&
- func_volatile(expr->elemfuncid) == PROVOLATILE_VOLATILE)
- return true;
- /* else fall through to check args */
- }
- else if (IsA(node, RowCompareExpr))
+ /* Recurse to check arguments */
+ if (IsA(node, Query))
{
- /* RowCompare probably can't have volatile ops, but check anyway */
- RowCompareExpr *rcexpr = (RowCompareExpr *) node;
- ListCell *opid;
-
- foreach(opid, rcexpr->opnos)
- {
- if (op_volatile(lfirst_oid(opid)) == PROVOLATILE_VOLATILE)
- return true;
- }
- /* else fall through to check args */
+ /* Recurse into subselects */
+ return query_tree_walker((Query *) node,
+ contain_volatile_functions_not_nextval_walker,
+ context, 0);
}
- return expression_tree_walker(node, contain_volatile_functions_not_nextval_walker,
+ return expression_tree_walker(node,
+ contain_volatile_functions_not_nextval_walker,
context);
}
@@ -1343,12 +1142,12 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context)
*****************************************************************************/
/*
- * Check whether a node tree contains parallel hazards. This is used both
- * on the entire query tree, to see whether the query can be parallelized at
- * all, and also to evaluate whether a particular expression is safe to
- * run in a parallel worker. We could separate these concerns into two
- * different functions, but there's enough overlap that it doesn't seem
- * worthwhile.
+ * Check whether a node tree contains parallel hazards. This is used both on
+ * the entire query tree, to see whether the query can be parallelized at all
+ * (with allow_restricted = true), and also to evaluate whether a particular
+ * expression is safe to run within a parallel worker (with allow_restricted =
+ * false). We could separate these concerns into two different functions, but
+ * there's enough overlap that it doesn't seem worthwhile.
*/
bool
has_parallel_hazard(Node *node, bool allow_restricted)
@@ -1360,48 +1159,47 @@ has_parallel_hazard(Node *node, bool allow_restricted)
}
static bool
+has_parallel_hazard_checker(Oid func_id, void *context)
+{
+ char proparallel = func_parallel(func_id);
+
+ if (((has_parallel_hazard_arg *) context)->allow_restricted)
+ return (proparallel == PROPARALLEL_UNSAFE);
+ else
+ return (proparallel != PROPARALLEL_SAFE);
+}
+
+static bool
has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
{
if (node == NULL)
return false;
+ /* Check for hazardous functions in node itself */
+ if (check_functions_in_node(node, has_parallel_hazard_checker,
+ context))
+ return true;
+
/*
- * When we're first invoked on a completely unplanned tree, we must
- * recurse through Query objects to as to locate parallel-unsafe
- * constructs anywhere in the tree.
- *
- * Later, we'll be called again for specific quals, possibly after some
- * planning has been done, we may encounter SubPlan, SubLink, or
- * AlternativeSubLink nodes. Currently, there's no need to recurse
- * through these; they can't be unsafe, since we've already cleared the
- * entire query of unsafe operations, and they're definitely
- * parallel-restricted.
+ * It should be OK to treat MinMaxExpr as parallel-safe, since btree
+ * opclass support functions are generally parallel-safe. XmlExpr is a
+ * bit more dubious but we can probably get away with it. We err on the
+ * side of caution by treating CoerceToDomain as parallel-restricted.
+ * (Note: in principle that's wrong because a domain constraint could
+ * contain a parallel-unsafe function; but useful constraints probably
+ * never would have such, and assuming they do would cripple use of
+ * parallel query in the presence of domain types.)
*/
- if (IsA(node, Query))
+ if (IsA(node, CoerceToDomain))
{
- Query *query = (Query *) node;
-
- if (query->rowMarks != NULL)
- return true;
-
- /* Recurse into subselects */
- return query_tree_walker(query,
- has_parallel_hazard_walker,
- context, 0);
- }
- else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
- IsA(node, AlternativeSubPlan) ||IsA(node, Param))
- {
- /*
- * Since we don't have the ability to push subplans down to workers at
- * present, we treat subplan references as parallel-restricted.
- */
if (!context->allow_restricted)
return true;
}
- /* This is just a notational convenience for callers. */
- if (IsA(node, RestrictInfo))
+ /*
+ * As a notational convenience for callers, look through RestrictInfo.
+ */
+ else if (IsA(node, RestrictInfo))
{
RestrictInfo *rinfo = (RestrictInfo *) node;
@@ -1409,111 +1207,56 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
}
/*
- * For each node that might potentially call a function, we need to
- * examine the pg_proc.proparallel marking for that function to see
- * whether it's safe enough for the current value of allow_restricted.
+ * Since we don't have the ability to push subplans down to workers at
+ * present, we treat subplan references as parallel-restricted. We need
+ * not worry about examining their contents; if they are unsafe, we would
+ * have found that out while examining the whole tree before reduction of
+ * sublinks to subplans. (Really we should not see SubLink during a
+ * not-allow_restricted scan, but if we do, return true.)
*/
- if (IsA(node, FuncExpr))
+ else if (IsA(node, SubLink) ||
+ IsA(node, SubPlan) ||
+ IsA(node, AlternativeSubPlan))
{
- FuncExpr *expr = (FuncExpr *) node;
-
- if (parallel_too_dangerous(func_parallel(expr->funcid), context))
- return true;
- }
- else if (IsA(node, Aggref))
- {
- Aggref *aggref = (Aggref *) node;
-
- if (parallel_too_dangerous(func_parallel(aggref->aggfnoid), context))
- return true;
- }
- else if (IsA(node, OpExpr))
- {
- OpExpr *expr = (OpExpr *) node;
-
- set_opfuncid(expr);
- if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
+ if (!context->allow_restricted)
return true;
}
- else if (IsA(node, DistinctExpr))
- {
- DistinctExpr *expr = (DistinctExpr *) node;
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
- return true;
- }
- else if (IsA(node, NullIfExpr))
+ /*
+ * We can't pass Params to workers at the moment either, so they are also
+ * parallel-restricted.
+ */
+ else if (IsA(node, Param))
{
- NullIfExpr *expr = (NullIfExpr *) node;
-
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
- if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
+ if (!context->allow_restricted)
return true;
}
- else if (IsA(node, ScalarArrayOpExpr))
- {
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
- set_sa_opfuncid(expr);
- if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
- return true;
- }
- else if (IsA(node, CoerceViaIO))
- {
- CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
-
- /* check the result type's input function */
- getTypeInputInfo(expr->resulttype,
- &iofunc, &typioparam);
- if (parallel_too_dangerous(func_parallel(iofunc), context))
- return true;
- /* check the input type's output function */
- getTypeOutputInfo(exprType((Node *) expr->arg),
- &iofunc, &typisvarlena);
- if (parallel_too_dangerous(func_parallel(iofunc), context))
- return true;
- }
- else if (IsA(node, ArrayCoerceExpr))
+ /*
+ * When we're first invoked on a completely unplanned tree, we must
+ * recurse into subqueries so to as to locate parallel-unsafe constructs
+ * anywhere in the tree.
+ */
+ else if (IsA(node, Query))
{
- ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+ Query *query = (Query *) node;
- if (OidIsValid(expr->elemfuncid) &&
- parallel_too_dangerous(func_parallel(expr->elemfuncid), context))
+ /* SELECT FOR UPDATE/SHARE must be treated as unsafe */
+ if (query->rowMarks != NULL)
return true;
- }
- else if (IsA(node, RowCompareExpr))
- {
- RowCompareExpr *rcexpr = (RowCompareExpr *) node;
- ListCell *opid;
- foreach(opid, rcexpr->opnos)
- {
- Oid opfuncid = get_opcode(lfirst_oid(opid));
-
- if (parallel_too_dangerous(func_parallel(opfuncid), context))
- return true;
- }
+ /* Recurse into subselects */
+ return query_tree_walker(query,
+ has_parallel_hazard_walker,
+ context, 0);
}
- /* ... and recurse to check substructure */
+ /* Recurse to check arguments */
return expression_tree_walker(node,
has_parallel_hazard_walker,
context);
}
-static bool
-parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context)
-{
- if (context->allow_restricted)
- return proparallel == PROPARALLEL_UNSAFE;
- else
- return proparallel != PROPARALLEL_SAFE;
-}
-
/*****************************************************************************
* Check clauses for nonstrict functions
*****************************************************************************/
@@ -1537,6 +1280,12 @@ contain_nonstrict_functions(Node *clause)
}
static bool
+contain_nonstrict_functions_checker(Oid func_id, void *context)
+{
+ return !func_strict(func_id);
+}
+
+static bool
contain_nonstrict_functions_walker(Node *node, void *context)
{
if (node == NULL)
@@ -1546,6 +1295,14 @@ contain_nonstrict_functions_walker(Node *node, void *context)
/* an aggregate could return non-null with null input */
return true;
}
+ if (IsA(node, GroupingFunc))
+ {
+ /*
+ * A GroupingFunc doesn't evaluate its arguments, and therefore must
+ * be treated as nonstrict.
+ */
+ return true;
+ }
if (IsA(node, WindowFunc))
{
/* a window function could return non-null with null input */
@@ -1558,37 +1315,15 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true;
/* else fall through to check args */
}
- if (IsA(node, FuncExpr))
- {
- FuncExpr *expr = (FuncExpr *) node;
-
- if (!func_strict(expr->funcid))
- return true;
- /* else fall through to check args */
- }
- if (IsA(node, OpExpr))
- {
- OpExpr *expr = (OpExpr *) node;
-
- set_opfuncid(expr);
- if (!func_strict(expr->opfuncid))
- return true;
- /* else fall through to check args */
- }
if (IsA(node, DistinctExpr))
{
/* IS DISTINCT FROM is inherently non-strict */
return true;
}
if (IsA(node, NullIfExpr))
- return true;
- if (IsA(node, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
-
- if (!is_strict_saop(expr, false))
- return true;
- /* else fall through to check args */
+ /* NULLIF is inherently non-strict */
+ return true;
}
if (IsA(node, BoolExpr))
{
@@ -1613,7 +1348,6 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true;
if (IsA(node, AlternativeSubPlan))
return true;
- /* ArrayCoerceExpr is strict at the array level, regardless of elemfunc */
if (IsA(node, FieldStore))
return true;
if (IsA(node, CaseExpr))
@@ -1634,6 +1368,15 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true;
if (IsA(node, BooleanTest))
return true;
+
+ /*
+ * Check other function-containing nodes; but ArrayCoerceExpr is strict at
+ * the array level, regardless of elemfunc.
+ */
+ if (!IsA(node, ArrayCoerceExpr) &&
+ check_functions_in_node(node, contain_nonstrict_functions_checker,
+ context))
+ return true;
return expression_tree_walker(node, contain_nonstrict_functions_walker,
context);
}
@@ -1662,6 +1405,12 @@ contain_leaked_vars(Node *clause)
}
static bool
+contain_leaked_vars_checker(Oid func_id, void *context)
+{
+ return !get_func_leakproof(func_id);
+}
+
+static bool
contain_leaked_vars_walker(Node *node, void *context)
{
if (node == NULL)
@@ -1672,10 +1421,14 @@ contain_leaked_vars_walker(Node *node, void *context)
case T_Var:
case T_Const:
case T_Param:
+ case T_ArrayRef:
case T_ArrayExpr:
+ case T_FieldSelect:
+ case T_FieldStore:
case T_NamedArgExpr:
case T_BoolExpr:
case T_RelabelType:
+ case T_CollateExpr:
case T_CaseExpr:
case T_CaseTestExpr:
case T_RowExpr:
@@ -1691,114 +1444,35 @@ contain_leaked_vars_walker(Node *node, void *context)
break;
case T_FuncExpr:
- {
- FuncExpr *expr = (FuncExpr *) node;
-
- if (!get_func_leakproof(expr->funcid) &&
- contain_var_clause((Node *) expr->args))
- return true;
- }
- break;
-
case T_OpExpr:
- case T_DistinctExpr: /* struct-equivalent to OpExpr */
- case T_NullIfExpr: /* struct-equivalent to OpExpr */
- {
- OpExpr *expr = (OpExpr *) node;
-
- set_opfuncid(expr);
- if (!get_func_leakproof(expr->opfuncid) &&
- contain_var_clause((Node *) expr->args))
- return true;
- }
- break;
-
+ case T_DistinctExpr:
+ case T_NullIfExpr:
case T_ScalarArrayOpExpr:
- {
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
-
- set_sa_opfuncid(expr);
- if (!get_func_leakproof(expr->opfuncid) &&
- contain_var_clause((Node *) expr->args))
- return true;
- }
- break;
-
case T_CoerceViaIO:
- {
- CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid funcid;
- Oid ioparam;
- bool leakproof;
- bool varlena;
-
- /*
- * Data may be leaked if either the input or the output
- * function is leaky.
- */
- getTypeInputInfo(exprType((Node *) expr->arg),
- &funcid, &ioparam);
- leakproof = get_func_leakproof(funcid);
-
- /*
- * If the input function is leakproof, then check the output
- * function.
- */
- if (leakproof)
- {
- getTypeOutputInfo(expr->resulttype, &funcid, &varlena);
- leakproof = get_func_leakproof(funcid);
- }
-
- if (!leakproof &&
- contain_var_clause((Node *) expr->arg))
- return true;
- }
- break;
-
case T_ArrayCoerceExpr:
- {
- ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
- Oid funcid;
- Oid ioparam;
- bool leakproof;
- bool varlena;
-
- /*
- * Data may be leaked if either the input or the output
- * function is leaky.
- */
- getTypeInputInfo(exprType((Node *) expr->arg),
- &funcid, &ioparam);
- leakproof = get_func_leakproof(funcid);
- /*
- * If the input function is leakproof, then check the output
- * function.
- */
- if (leakproof)
- {
- getTypeOutputInfo(expr->resulttype, &funcid, &varlena);
- leakproof = get_func_leakproof(funcid);
- }
-
- if (!leakproof &&
- contain_var_clause((Node *) expr->arg))
- return true;
- }
+ /*
+ * If node contains a leaky function call, and there's any Var
+ * underneath it, reject.
+ */
+ if (check_functions_in_node(node, contain_leaked_vars_checker,
+ context) &&
+ contain_var_clause(node))
+ return true;
break;
case T_RowCompareExpr:
{
+ /*
+ * It's worth special-casing this because a leaky comparison
+ * function only compromises one pair of row elements, which
+ * might not contain Vars while others do.
+ */
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *opid;
ListCell *larg;
ListCell *rarg;
- /*
- * Check the comparison function and arguments passed to it
- * for each pair of row elements.
- */
forthree(opid, rcexpr->opnos,
larg, rcexpr->largs,
rarg, rcexpr->rargs)
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 1ed196ea72a..2c2efb1576b 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -19,8 +19,8 @@
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "miscadmin.h"
+#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
-#include "optimizer/planmain.h"
#include "optimizer/predtest.h"
#include "utils/array.h"
#include "utils/inval.h"
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 70a651a8fc3..c958758df62 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -60,8 +60,8 @@
#include "commands/policy.h"
#include "commands/trigger.h"
#include "miscadmin.h"
+#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
-#include "optimizer/planmain.h"
#include "optimizer/prep.h"
#include "optimizer/var.h"
#include "rewrite/rewriteDefine.h"
diff --git a/src/include/nodes/nodeFuncs.h b/src/include/nodes/nodeFuncs.h
index 1dacc996e80..97af1429296 100644
--- a/src/include/nodes/nodeFuncs.h
+++ b/src/include/nodes/nodeFuncs.h
@@ -25,6 +25,9 @@
#define QTW_EXAMINE_RTES 0x10 /* examine RTEs */
#define QTW_DONT_COPY_QUERY 0x20 /* do not copy top Query */
+/* callback function for check_functions_in_node */
+typedef bool (*check_function_callback) (Oid func_id, void *context);
+
extern Oid exprType(const Node *expr);
extern int32 exprTypmod(const Node *expr);
@@ -40,6 +43,13 @@ extern void exprSetInputCollation(Node *expr, Oid inputcollation);
extern int exprLocation(const Node *expr);
+extern void fix_opfuncids(Node *node);
+extern void set_opfuncid(OpExpr *opexpr);
+extern void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
+
+extern bool check_functions_in_node(Node *node, check_function_callback checker,
+ void *context);
+
extern bool expression_tree_walker(Node *node, bool (*walker) (),
void *context);
extern Node *expression_tree_mutator(Node *node, Node *(*mutator) (),
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index da9c6404775..a48400b1573 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -107,9 +107,6 @@ extern bool query_is_distinct_for(Query *query, List *colnos, List *opids);
* prototypes for plan/setrefs.c
*/
extern Plan *set_plan_references(PlannerInfo *root, Plan *plan);
-extern void fix_opfuncids(Node *node);
-extern void set_opfuncid(OpExpr *opexpr);
-extern void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
extern void record_plan_function_dependency(PlannerInfo *root, Oid funcid);
extern void extract_query_dependencies(Node *query,
List **relationOids,