aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/explain.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands/explain.c')
-rw-r--r--src/backend/commands/explain.c144
1 files changed, 144 insertions, 0 deletions
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 872aaa7aedc..ede8cec9472 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -108,6 +108,8 @@ static void show_sort_info(SortState *sortstate, ExplainState *es);
static void show_incremental_sort_info(IncrementalSortState *incrsortstate,
ExplainState *es);
static void show_hash_info(HashState *hashstate, ExplainState *es);
+static void show_resultcache_info(ResultCacheState *rcstate, List *ancestors,
+ ExplainState *es);
static void show_hashagg_info(AggState *hashstate, ExplainState *es);
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
ExplainState *es);
@@ -1284,6 +1286,9 @@ ExplainNode(PlanState *planstate, List *ancestors,
case T_Material:
pname = sname = "Materialize";
break;
+ case T_ResultCache:
+ pname = sname = "Result Cache";
+ break;
case T_Sort:
pname = sname = "Sort";
break;
@@ -1996,6 +2001,10 @@ ExplainNode(PlanState *planstate, List *ancestors,
case T_Hash:
show_hash_info(castNode(HashState, planstate), es);
break;
+ case T_ResultCache:
+ show_resultcache_info(castNode(ResultCacheState, planstate),
+ ancestors, es);
+ break;
default:
break;
}
@@ -3064,6 +3073,141 @@ show_hash_info(HashState *hashstate, ExplainState *es)
}
/*
+ * Show information on result cache hits/misses/evictions and memory usage.
+ */
+static void
+show_resultcache_info(ResultCacheState *rcstate, List *ancestors,
+ ExplainState *es)
+{
+ Plan *plan = ((PlanState *) rcstate)->plan;
+ ListCell *lc;
+ List *context;
+ StringInfoData keystr;
+ char *seperator = "";
+ bool useprefix;
+ int64 memPeakKb;
+
+ initStringInfo(&keystr);
+
+ /*
+ * It's hard to imagine having a result cache with fewer than 2 RTEs, but
+ * let's just keep the same useprefix logic as elsewhere in this file.
+ */
+ useprefix = list_length(es->rtable) > 1 || es->verbose;
+
+ /* Set up deparsing context */
+ context = set_deparse_context_plan(es->deparse_cxt,
+ plan,
+ ancestors);
+
+ foreach(lc, ((ResultCache *) plan)->param_exprs)
+ {
+ Node *expr = (Node *) lfirst(lc);
+
+ appendStringInfoString(&keystr, seperator);
+
+ appendStringInfoString(&keystr, deparse_expression(expr, context,
+ useprefix, false));
+ seperator = ", ";
+ }
+
+ if (es->format != EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainPropertyText("Cache Key", keystr.data, es);
+ }
+ else
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str, "Cache Key: %s\n", keystr.data);
+ }
+
+ pfree(keystr.data);
+
+ if (!es->analyze)
+ return;
+
+ /*
+ * mem_peak is only set when we freed memory, so we must use mem_used when
+ * mem_peak is 0.
+ */
+ if (rcstate->stats.mem_peak > 0)
+ memPeakKb = (rcstate->stats.mem_peak + 1023) / 1024;
+ else
+ memPeakKb = (rcstate->mem_used + 1023) / 1024;
+
+ if (rcstate->stats.cache_misses > 0)
+ {
+ if (es->format != EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainPropertyInteger("Cache Hits", NULL, rcstate->stats.cache_hits, es);
+ ExplainPropertyInteger("Cache Misses", NULL, rcstate->stats.cache_misses, es);
+ ExplainPropertyInteger("Cache Evictions", NULL, rcstate->stats.cache_evictions, es);
+ ExplainPropertyInteger("Cache Overflows", NULL, rcstate->stats.cache_overflows, es);
+ ExplainPropertyInteger("Peak Memory Usage", "kB", memPeakKb, es);
+ }
+ else
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str,
+ "Hits: " UINT64_FORMAT " Misses: " UINT64_FORMAT " Evictions: " UINT64_FORMAT " Overflows: " UINT64_FORMAT " Memory Usage: " INT64_FORMAT "kB\n",
+ rcstate->stats.cache_hits,
+ rcstate->stats.cache_misses,
+ rcstate->stats.cache_evictions,
+ rcstate->stats.cache_overflows,
+ memPeakKb);
+ }
+ }
+
+ if (rcstate->shared_info == NULL)
+ return;
+
+ /* Show details from parallel workers */
+ for (int n = 0; n < rcstate->shared_info->num_workers; n++)
+ {
+ ResultCacheInstrumentation *si;
+
+ si = &rcstate->shared_info->sinstrument[n];
+
+ if (es->workers_state)
+ ExplainOpenWorker(n, es);
+
+ /*
+ * Since the worker's ResultCacheState.mem_used field is unavailable
+ * to us, ExecEndResultCache will have set the
+ * ResultCacheInstrumentation.mem_peak field for us. No need to do
+ * the zero checks like we did for the serial case above.
+ */
+ memPeakKb = (si->mem_peak + 1023) / 1024;
+
+ if (es->format == EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str,
+ "Hits: " UINT64_FORMAT " Misses: " UINT64_FORMAT " Evictions: " UINT64_FORMAT " Overflows: " UINT64_FORMAT " Memory Usage: " INT64_FORMAT "kB\n",
+ si->cache_hits, si->cache_misses,
+ si->cache_evictions, si->cache_overflows,
+ memPeakKb);
+ }
+ else
+ {
+ ExplainPropertyInteger("Cache Hits", NULL,
+ si->cache_hits, es);
+ ExplainPropertyInteger("Cache Misses", NULL,
+ si->cache_misses, es);
+ ExplainPropertyInteger("Cache Evictions", NULL,
+ si->cache_evictions, es);
+ ExplainPropertyInteger("Cache Overflows", NULL,
+ si->cache_overflows, es);
+ ExplainPropertyInteger("Peak Memory Usage", "kB", memPeakKb,
+ es);
+ }
+
+ if (es->workers_state)
+ ExplainCloseWorker(n, es);
+ }
+}
+
+/*
* Show information on hash aggregate memory usage and batches.
*/
static void