aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/plan/planner.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2016-07-15 17:22:56 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2016-07-15 17:23:02 -0400
commit45639a0525a58a2700cf46d4c934d6de78349dac (patch)
treea4333eca63b541394555355bdfbea0fb95fbd35e /src/backend/optimizer/plan/planner.c
parent533e9c6b0628f6557ddcaf3e5177081878ea7cb6 (diff)
downloadpostgresql-45639a0525a58a2700cf46d4c934d6de78349dac.tar.gz
postgresql-45639a0525a58a2700cf46d4c934d6de78349dac.zip
Avoid invalidating all foreign-join cached plans when user mappings change.
We must not push down a foreign join when the foreign tables involved should be accessed under different user mappings. Previously we tried to enforce that rule literally during planning, but that meant that the resulting plans were dependent on the current contents of the pg_user_mapping catalog, and we had to blow away all cached plans containing any remote join when anything at all changed in pg_user_mapping. This could have been improved somewhat, but the fact that a syscache inval callback has very limited info about what changed made it hard to do better within that design. Instead, let's change the planner to not consider user mappings per se, but to allow a foreign join if both RTEs have the same checkAsUser value. If they do, then they necessarily will use the same user mapping at runtime, and we don't need to know specifically which one that is. Post-plan-time changes in pg_user_mapping no longer require any plan invalidation. This rule does give up some optimization ability, to wit where two foreign table references come from views with different owners or one's from a view and one's directly in the query, but nonetheless the same user mapping would have applied. We'll sacrifice the first case, but to not regress more than we have to in the second case, allow a foreign join involving both zero and nonzero checkAsUser values if the nonzero one is the same as the prevailing effective userID. In that case, mark the plan as only runnable by that userID. The plancache code already had a notion of plans being userID-specific, in order to support RLS. It was a little confused though, in particular lacking clarity of thought as to whether it was the rewritten query or just the finished plan that's dependent on the userID. Rearrange that code so that it's clearer what depends on which, and so that the same logic applies to both RLS-injected role dependency and foreign-join-injected role dependency. Note that this patch doesn't remove the other issue mentioned in the original complaint, which is that while we'll reliably stop using a foreign join if it's disallowed in a new context, we might fail to start using a foreign join if it's now allowed, but we previously created a generic cached plan that didn't use one. It was agreed that the chance of winning that way was not high enough to justify the much larger number of plan invalidations that would have to occur if we tried to cause it to happen. In passing, clean up randomly-varying spelling of EXPLAIN commands in postgres_fdw.sql, and fix a COSTS ON example that had been allowed to leak into the committed tests. This reverts most of commits fbe5a3fb7 and 5d4171d1c, which were the previous attempt at ensuring we wouldn't push down foreign joins that span permissions contexts. Etsuro Fujita and Tom Lane Discussion: <d49c1e5b-f059-20f4-c132-e9752ee0113e@lab.ntt.co.jp>
Diffstat (limited to 'src/backend/optimizer/plan/planner.c')
-rw-r--r--src/backend/optimizer/plan/planner.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index f484fb91c11..b2656283251 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -219,8 +219,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
glob->lastRowMarkId = 0;
glob->lastPlanNodeId = 0;
glob->transientPlan = false;
- glob->hasRowSecurity = false;
- glob->hasForeignJoin = false;
+ glob->dependsOnRole = false;
/*
* Assess whether it's feasible to use parallel mode for this query. We
@@ -405,6 +404,8 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
result->hasModifyingCTE = parse->hasModifyingCTE;
result->canSetTag = parse->canSetTag;
result->transientPlan = glob->transientPlan;
+ result->dependsOnRole = glob->dependsOnRole;
+ result->parallelModeNeeded = glob->parallelModeNeeded;
result->planTree = top_plan;
result->rtable = glob->finalrtable;
result->resultRelations = glob->resultRelations;
@@ -415,9 +416,6 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
result->relationOids = glob->relationOids;
result->invalItems = glob->invalItems;
result->nParamExec = glob->nParamExec;
- result->hasRowSecurity = glob->hasRowSecurity;
- result->parallelModeNeeded = glob->parallelModeNeeded;
- result->hasForeignJoin = glob->hasForeignJoin;
return result;
}
@@ -1628,8 +1626,6 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* This may add new security barrier subquery RTEs to the rangetable.
*/
expand_security_quals(root, tlist);
- if (parse->hasRowSecurity)
- root->glob->hasRowSecurity = true;
/*
* We are now done hacking up the query's targetlist. Most of the
@@ -1960,7 +1956,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* If the current_rel belongs to a single FDW, so does the final_rel.
*/
final_rel->serverid = current_rel->serverid;
- final_rel->umid = current_rel->umid;
+ final_rel->userid = current_rel->userid;
+ final_rel->useridiscurrent = current_rel->useridiscurrent;
final_rel->fdwroutine = current_rel->fdwroutine;
/*
@@ -3337,7 +3334,8 @@ create_grouping_paths(PlannerInfo *root,
* If the input rel belongs to a single FDW, so does the grouped rel.
*/
grouped_rel->serverid = input_rel->serverid;
- grouped_rel->umid = input_rel->umid;
+ grouped_rel->userid = input_rel->userid;
+ grouped_rel->useridiscurrent = input_rel->useridiscurrent;
grouped_rel->fdwroutine = input_rel->fdwroutine;
/*
@@ -3891,7 +3889,8 @@ create_window_paths(PlannerInfo *root,
* If the input rel belongs to a single FDW, so does the window rel.
*/
window_rel->serverid = input_rel->serverid;
- window_rel->umid = input_rel->umid;
+ window_rel->userid = input_rel->userid;
+ window_rel->useridiscurrent = input_rel->useridiscurrent;
window_rel->fdwroutine = input_rel->fdwroutine;
/*
@@ -4071,7 +4070,8 @@ create_distinct_paths(PlannerInfo *root,
* If the input rel belongs to a single FDW, so does the distinct_rel.
*/
distinct_rel->serverid = input_rel->serverid;
- distinct_rel->umid = input_rel->umid;
+ distinct_rel->userid = input_rel->userid;
+ distinct_rel->useridiscurrent = input_rel->useridiscurrent;
distinct_rel->fdwroutine = input_rel->fdwroutine;
/* Estimate number of distinct rows there will be */
@@ -4279,7 +4279,8 @@ create_ordered_paths(PlannerInfo *root,
* If the input rel belongs to a single FDW, so does the ordered_rel.
*/
ordered_rel->serverid = input_rel->serverid;
- ordered_rel->umid = input_rel->umid;
+ ordered_rel->userid = input_rel->userid;
+ ordered_rel->useridiscurrent = input_rel->useridiscurrent;
ordered_rel->fdwroutine = input_rel->fdwroutine;
foreach(lc, input_rel->pathlist)