aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/explain.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands/explain.c')
-rw-r--r--src/backend/commands/explain.c60
1 files changed, 44 insertions, 16 deletions
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 447f69d044e..7e4fbafc535 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -19,7 +19,7 @@
#include "commands/createas.h"
#include "commands/defrem.h"
#include "commands/prepare.h"
-#include "executor/hashjoin.h"
+#include "executor/nodeHash.h"
#include "foreign/fdwapi.h"
#include "nodes/extensible.h"
#include "nodes/nodeFuncs.h"
@@ -2379,34 +2379,62 @@ show_sort_info(SortState *sortstate, ExplainState *es)
static void
show_hash_info(HashState *hashstate, ExplainState *es)
{
- HashJoinTable hashtable;
+ HashInstrumentation *hinstrument = NULL;
- hashtable = hashstate->hashtable;
+ /*
+ * In a parallel query, the leader process may or may not have run the
+ * hash join, and even if it did it may not have built a hash table due to
+ * timing (if it started late it might have seen no tuples in the outer
+ * relation and skipped building the hash table). Therefore we have to be
+ * prepared to get instrumentation data from a worker if there is no hash
+ * table.
+ */
+ if (hashstate->hashtable)
+ {
+ hinstrument = (HashInstrumentation *)
+ palloc(sizeof(HashInstrumentation));
+ ExecHashGetInstrumentation(hinstrument, hashstate->hashtable);
+ }
+ else if (hashstate->shared_info)
+ {
+ SharedHashInfo *shared_info = hashstate->shared_info;
+ int i;
+
+ /* Find the first worker that built a hash table. */
+ for (i = 0; i < shared_info->num_workers; ++i)
+ {
+ if (shared_info->hinstrument[i].nbatch > 0)
+ {
+ hinstrument = &shared_info->hinstrument[i];
+ break;
+ }
+ }
+ }
- if (hashtable)
+ if (hinstrument)
{
- long spacePeakKb = (hashtable->spacePeak + 1023) / 1024;
+ long spacePeakKb = (hinstrument->space_peak + 1023) / 1024;
if (es->format != EXPLAIN_FORMAT_TEXT)
{
- ExplainPropertyLong("Hash Buckets", hashtable->nbuckets, es);
+ ExplainPropertyLong("Hash Buckets", hinstrument->nbuckets, es);
ExplainPropertyLong("Original Hash Buckets",
- hashtable->nbuckets_original, es);
- ExplainPropertyLong("Hash Batches", hashtable->nbatch, es);
+ hinstrument->nbuckets_original, es);
+ ExplainPropertyLong("Hash Batches", hinstrument->nbatch, es);
ExplainPropertyLong("Original Hash Batches",
- hashtable->nbatch_original, es);
+ hinstrument->nbatch_original, es);
ExplainPropertyLong("Peak Memory Usage", spacePeakKb, es);
}
- else if (hashtable->nbatch_original != hashtable->nbatch ||
- hashtable->nbuckets_original != hashtable->nbuckets)
+ else if (hinstrument->nbatch_original != hinstrument->nbatch ||
+ hinstrument->nbuckets_original != hinstrument->nbuckets)
{
appendStringInfoSpaces(es->str, es->indent * 2);
appendStringInfo(es->str,
"Buckets: %d (originally %d) Batches: %d (originally %d) Memory Usage: %ldkB\n",
- hashtable->nbuckets,
- hashtable->nbuckets_original,
- hashtable->nbatch,
- hashtable->nbatch_original,
+ hinstrument->nbuckets,
+ hinstrument->nbuckets_original,
+ hinstrument->nbatch,
+ hinstrument->nbatch_original,
spacePeakKb);
}
else
@@ -2414,7 +2442,7 @@ show_hash_info(HashState *hashstate, ExplainState *es)
appendStringInfoSpaces(es->str, es->indent * 2);
appendStringInfo(es->str,
"Buckets: %d Batches: %d Memory Usage: %ldkB\n",
- hashtable->nbuckets, hashtable->nbatch,
+ hinstrument->nbuckets, hinstrument->nbatch,
spacePeakKb);
}
}