aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeIndexscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/nodeIndexscan.c')
-rw-r--r--src/backend/executor/nodeIndexscan.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 37fca4c666c..0ece6c6702f 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.87 2003/11/29 19:51:48 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.88 2003/12/30 20:05:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -953,22 +953,28 @@ static void
create_duphash(IndexScanState *node)
{
HASHCTL hash_ctl;
+ long nbuckets;
+ node->iss_MaxHash = (SortMem * 1024L) /
+ (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = SizeOfIptrData;
hash_ctl.entrysize = sizeof(DupHashTabEntry);
hash_ctl.hash = tag_hash;
hash_ctl.hcxt = CurrentMemoryContext;
+ nbuckets = (long) ceil(node->ss.ps.plan->plan_rows);
+ if (nbuckets < 1)
+ nbuckets = 1;
+ if (nbuckets > node->iss_MaxHash)
+ nbuckets = node->iss_MaxHash;
node->iss_DupHash = hash_create("DupHashTable",
- (long) ceil(node->ss.ps.plan->plan_rows),
+ nbuckets,
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
if (node->iss_DupHash == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- node->iss_MaxHash = (SortMem * 1024L) /
- (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
}
int