From f3d3118532175541a9a96ed78881a3b04a057128 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sat, 16 May 2015 03:40:59 +0200 Subject: Support GROUPING SETS, CUBE and ROLLUP. This SQL standard functionality allows to aggregate data by different GROUP BY clauses at once. Each grouping set returns rows with columns grouped by in other sets set to NULL. This could previously be achieved by doing each grouping as a separate query, conjoined by UNION ALLs. Besides being considerably more concise, grouping sets will in many cases be faster, requiring only one scan over the underlying data. The current implementation of grouping sets only supports using sorting for input. Individual sets that share a sort order are computed in one pass. If there are sets that don't share a sort order, additional sort & aggregation steps are performed. These additional passes are sourced by the previous sort step; thus avoiding repeated scans of the source data. The code is structured in a way that adding support for purely using hash aggregation or a mix of hashing and sorting is possible. Sorting was chosen to be supported first, as it is the most generic method of implementation. Instead of, as in an earlier versions of the patch, representing the chain of sort and aggregation steps as full blown planner and executor nodes, all but the first sort are performed inside the aggregation node itself. This avoids the need to do some unusual gymnastics to handle having to return aggregated and non-aggregated tuples from underlying nodes, as well as having to shut down underlying nodes early to limit memory usage. The optimizer still builds Sort/Agg node to describe each phase, but they're not part of the plan tree, but instead additional data for the aggregation node. They're a convenient and preexisting way to describe aggregation and sorting. The first (and possibly only) sort step is still performed as a separate execution step. That retains similarity with existing group by plans, makes rescans fairly simple, avoids very deep plans (leading to slow explains) and easily allows to avoid the sorting step if the underlying data is sorted by other means. A somewhat ugly side of this patch is having to deal with a grammar ambiguity between the new CUBE keyword and the cube extension/functions named cube (and rollup). To avoid breaking existing deployments of the cube extension it has not been renamed, neither has cube been made a reserved keyword. Instead precedence hacking is used to make GROUP BY cube(..) refer to the CUBE grouping sets feature, and not the function cube(). To actually group by a function cube(), unlikely as that might be, the function name has to be quoted. Needs a catversion bump because stored rules may change. Author: Andrew Gierth and Atri Sharma, with contributions from Andres Freund Reviewed-By: Andres Freund, Noah Misch, Tom Lane, Svenne Krap, Tomas Vondra, Erik Rijkers, Marti Raudsepp, Pavel Stehule Discussion: CAOeZVidmVRe2jU6aMk_5qkxnB7dfmPROzM7Ur8JPW5j8Y5X-Lw@mail.gmail.com --- src/backend/nodes/nodeFuncs.c | 51 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'src/backend/nodes/nodeFuncs.c') diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 42d62d32d93..41763931339 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -54,6 +54,9 @@ exprType(const Node *expr) case T_Aggref: type = ((const Aggref *) expr)->aggtype; break; + case T_GroupingFunc: + type = INT4OID; + break; case T_WindowFunc: type = ((const WindowFunc *) expr)->wintype; break; @@ -750,6 +753,9 @@ exprCollation(const Node *expr) case T_Aggref: coll = ((const Aggref *) expr)->aggcollid; break; + case T_GroupingFunc: + coll = InvalidOid; + break; case T_WindowFunc: coll = ((const WindowFunc *) expr)->wincollid; break; @@ -986,6 +992,9 @@ exprSetCollation(Node *expr, Oid collation) case T_Aggref: ((Aggref *) expr)->aggcollid = collation; break; + case T_GroupingFunc: + Assert(!OidIsValid(collation)); + break; case T_WindowFunc: ((WindowFunc *) expr)->wincollid = collation; break; @@ -1202,6 +1211,9 @@ exprLocation(const Node *expr) /* function name should always be the first thing */ loc = ((const Aggref *) expr)->location; break; + case T_GroupingFunc: + loc = ((const GroupingFunc *) expr)->location; + break; case T_WindowFunc: /* function name should always be the first thing */ loc = ((const WindowFunc *) expr)->location; @@ -1491,6 +1503,9 @@ exprLocation(const Node *expr) /* XMLSERIALIZE keyword should always be the first thing */ loc = ((const XmlSerialize *) expr)->location; break; + case T_GroupingSet: + loc = ((const GroupingSet *) expr)->location; + break; case T_WithClause: loc = ((const WithClause *) expr)->location; break; @@ -1685,6 +1700,15 @@ expression_tree_walker(Node *node, return true; } break; + case T_GroupingFunc: + { + GroupingFunc *grouping = (GroupingFunc *) node; + + if (expression_tree_walker((Node *) grouping->args, + walker, context)) + return true; + } + break; case T_WindowFunc: { WindowFunc *expr = (WindowFunc *) node; @@ -2243,6 +2267,29 @@ expression_tree_mutator(Node *node, return (Node *) newnode; } break; + case T_GroupingFunc: + { + GroupingFunc *grouping = (GroupingFunc *) node; + GroupingFunc *newnode; + + FLATCOPY(newnode, grouping, GroupingFunc); + MUTATE(newnode->args, grouping->args, List *); + + /* + * We assume here that mutating the arguments does not change + * the semantics, i.e. that the arguments are not mutated in a + * way that makes them semantically different from their + * previously matching expressions in the GROUP BY clause. + * + * If a mutator somehow wanted to do this, it would have to + * handle the refs and cols lists itself as appropriate. + */ + newnode->refs = list_copy(grouping->refs); + newnode->cols = list_copy(grouping->cols); + + return (Node *) newnode; + } + break; case T_WindowFunc: { WindowFunc *wfunc = (WindowFunc *) node; @@ -2962,6 +3009,8 @@ raw_expression_tree_walker(Node *node, break; case T_RangeVar: return walker(((RangeVar *) node)->alias, context); + case T_GroupingFunc: + return walker(((GroupingFunc *) node)->args, context); case T_SubLink: { SubLink *sublink = (SubLink *) node; @@ -3287,6 +3336,8 @@ raw_expression_tree_walker(Node *node, /* for now, constraints are ignored */ } break; + case T_GroupingSet: + return walker(((GroupingSet *) node)->content, context); case T_LockingClause: return walker(((LockingClause *) node)->lockedRels, context); case T_XmlSerialize: -- cgit v1.2.3