aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/geqo
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer/geqo')
-rw-r--r--src/backend/optimizer/geqo/geqo_erx.c45
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c91
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c9
-rw-r--r--src/backend/optimizer/geqo/geqo_misc.c10
-rw-r--r--src/backend/optimizer/geqo/geqo_pool.c20
-rw-r--r--src/backend/optimizer/geqo/geqo_recombination.c10
-rw-r--r--src/backend/optimizer/geqo/geqo_selection.c15
7 files changed, 97 insertions, 103 deletions
diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c
index 05d7602fefe..9c7a3425858 100644
--- a/src/backend/optimizer/geqo/geqo_erx.c
+++ b/src/backend/optimizer/geqo/geqo_erx.c
@@ -3,7 +3,7 @@
* geqo_erx.c
* edge recombination crossover [ER]
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.19 2003/11/29 22:39:49 pgsql Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,8 +55,8 @@ alloc_edge_table(int num_gene)
Edge *edge_table;
/*
- * palloc one extra location so that nodes numbered 1..n can be
- * indexed directly; 0 will not be used
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
*/
edge_table = (Edge *) palloc((num_gene + 1) * sizeof(Edge));
@@ -94,8 +94,7 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table)
int i,
index1,
index2;
- int edge_total; /* total number of unique edges in two
- * genes */
+ int edge_total; /* total number of unique edges in two genes */
/* at first clear the edge table's old data */
for (i = 1; i <= num_gene; i++)
@@ -111,15 +110,15 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table)
for (index1 = 0; index1 < num_gene; index1++)
{
/*
- * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this
- * operaton maps n back to 1
+ * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
+ * maps n back to 1
*/
index2 = (index1 + 1) % num_gene;
/*
- * edges are bidirectional, i.e. 1->2 is same as 2->1 call
- * gimme_edge twice per edge
+ * edges are bidirectional, i.e. 1->2 is same as 2->1 call gimme_edge
+ * twice per edge
*/
edge_total += gimme_edge(tour1[index1], tour1[index2], edge_table);
@@ -320,10 +319,10 @@ gimme_gene(Edge edge, Edge *edge_table)
*/
/*
- * The test for minimum_count can probably be removed at some
- * point but comments should probably indicate exactly why it is
- * guaranteed that the test will always succeed the first time
- * around. If it can fail then the code is in error
+ * The test for minimum_count can probably be removed at some point
+ * but comments should probably indicate exactly why it is guaranteed
+ * that the test will always succeed the first time around. If it can
+ * fail then the code is in error
*/
@@ -379,8 +378,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
/*
- * how many edges remain? how many gene with four total (initial)
- * edges remain?
+ * how many edges remain? how many gene with four total (initial) edges
+ * remain?
*/
for (i = 1; i <= num_gene; i++)
@@ -395,8 +394,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
}
/*
- * random decision of the gene with remaining edges and whose
- * total_edges == 4
+ * random decision of the gene with remaining edges and whose total_edges
+ * == 4
*/
if (four_count != 0)
@@ -444,15 +443,15 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
}
/*
- * edge table seems to be empty; this happens sometimes on the last
- * point due to the fact that the first point is removed from the
- * table even though only one of its edges has been determined
+ * edge table seems to be empty; this happens sometimes on the last point
+ * due to the fact that the first point is removed from the table even
+ * though only one of its edges has been determined
*/
else
- { /* occurs only at the last point in the
- * tour; simply look for the point which
- * is not yet used */
+ { /* occurs only at the last point in the tour;
+ * simply look for the point which is not yet
+ * used */
for (i = 1; i <= num_gene; i++)
if (edge_table[i].unused_edges >= 0)
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index d1bb3059fc0..0a2dee08dc8 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.76 2005/06/09 04:18:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,15 +52,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
struct HTAB *savehash;
/*
- * Because gimme_tree considers both left- and right-sided trees,
- * there is no difference between a tour (a,b,c,d,...) and a tour
- * (b,a,c,d,...) --- the same join orders will be considered. To avoid
- * redundant cost calculations, we simply reject tours where tour[0] >
- * tour[1], assigning them an artificially bad fitness.
+ * Because gimme_tree considers both left- and right-sided trees, there is
+ * no difference between a tour (a,b,c,d,...) and a tour (b,a,c,d,...) ---
+ * the same join orders will be considered. To avoid redundant cost
+ * calculations, we simply reject tours where tour[0] > tour[1], assigning
+ * them an artificially bad fitness.
*
* init_tour() is aware of this rule and so we should never reject a tour
- * during the initial filling of the pool. It seems difficult to
- * persuade the recombination logic never to break the rule, however.
+ * during the initial filling of the pool. It seems difficult to persuade
+ * the recombination logic never to break the rule, however.
*/
if (num_gene >= 2 && tour[0] > tour[1])
return DBL_MAX;
@@ -69,10 +69,10 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* Create a private memory context that will hold all temp storage
* allocated inside gimme_tree().
*
- * Since geqo_eval() will be called many times, we can't afford to let
- * all that memory go unreclaimed until end of statement. Note we
- * make the temp context a child of the planner's normal context, so
- * that it will be freed even if we abort via ereport(ERROR).
+ * Since geqo_eval() will be called many times, we can't afford to let all
+ * that memory go unreclaimed until end of statement. Note we make the
+ * temp context a child of the planner's normal context, so that it will
+ * be freed even if we abort via ereport(ERROR).
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"GEQO",
@@ -84,15 +84,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
/*
* gimme_tree will add entries to root->join_rel_list, which may or may
* not already contain some entries. The newly added entries will be
- * recycled by the MemoryContextDelete below, so we must ensure that
- * the list is restored to its former state before exiting. We can
- * do this by truncating the list to its original length. NOTE this
- * assumes that any added entries are appended at the end!
+ * recycled by the MemoryContextDelete below, so we must ensure that the
+ * list is restored to its former state before exiting. We can do this by
+ * truncating the list to its original length. NOTE this assumes that any
+ * added entries are appended at the end!
*
- * We also must take care not to mess up the outer join_rel_hash,
- * if there is one. We can do this by just temporarily setting the
- * link to NULL. (If we are dealing with enough join rels, which we
- * very likely are, a new hash table will get built and used locally.)
+ * We also must take care not to mess up the outer join_rel_hash, if there is
+ * one. We can do this by just temporarily setting the link to NULL. (If
+ * we are dealing with enough join rels, which we very likely are, a new
+ * hash table will get built and used locally.)
*/
savelength = list_length(evaldata->root->join_rel_list);
savehash = evaldata->root->join_rel_hash;
@@ -170,23 +170,22 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* Push each relation onto the stack in the specified order. After
* pushing each relation, see whether the top two stack entries are
* joinable according to the desirable_join() heuristics. If so, join
- * them into one stack entry, and try again to combine with the next
- * stack entry down (if any). When the stack top is no longer
- * joinable, continue to the next input relation. After we have
- * pushed the last input relation, the heuristics are disabled and we
- * force joining all the remaining stack entries.
+ * them into one stack entry, and try again to combine with the next stack
+ * entry down (if any). When the stack top is no longer joinable,
+ * continue to the next input relation. After we have pushed the last
+ * input relation, the heuristics are disabled and we force joining all
+ * the remaining stack entries.
*
* If desirable_join() always returns true, this produces a straight
- * left-to-right join just like the old code. Otherwise we may
- * produce a bushy plan or a left/right-sided plan that really
- * corresponds to some tour other than the one given. To the extent
- * that the heuristics are helpful, however, this will be a better
- * plan than the raw tour.
+ * left-to-right join just like the old code. Otherwise we may produce a
+ * bushy plan or a left/right-sided plan that really corresponds to some
+ * tour other than the one given. To the extent that the heuristics are
+ * helpful, however, this will be a better plan than the raw tour.
*
- * Also, when a join attempt fails (because of IN-clause constraints), we
- * may be able to recover and produce a workable plan, where the old
- * code just had to give up. This case acts the same as a false
- * result from desirable_join().
+ * Also, when a join attempt fails (because of IN-clause constraints), we may
+ * be able to recover and produce a workable plan, where the old code just
+ * had to give up. This case acts the same as a false result from
+ * desirable_join().
*/
for (rel_count = 0; rel_count < num_gene; rel_count++)
{
@@ -199,8 +198,8 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
stack_depth++;
/*
- * While it's feasible, pop the top two stack entries and replace
- * with their join.
+ * While it's feasible, pop the top two stack entries and replace with
+ * their join.
*/
while (stack_depth >= 2)
{
@@ -208,20 +207,18 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
RelOptInfo *inner_rel = stack[stack_depth - 1];
/*
- * Don't pop if heuristics say not to join now. However, once
- * we have exhausted the input, the heuristics can't prevent
- * popping.
+ * Don't pop if heuristics say not to join now. However, once we
+ * have exhausted the input, the heuristics can't prevent popping.
*/
if (rel_count < num_gene - 1 &&
!desirable_join(evaldata->root, outer_rel, inner_rel))
break;
/*
- * Construct a RelOptInfo representing the join of these two
- * input relations. These are always inner joins. Note that
- * we expect the joinrel not to exist in root->join_rel_list
- * yet, and so the paths constructed for it will only include
- * the ones we want.
+ * Construct a RelOptInfo representing the join of these two input
+ * relations. These are always inner joins. Note that we expect
+ * the joinrel not to exist in root->join_rel_list yet, and so the
+ * paths constructed for it will only include the ones we want.
*/
joinrel = make_join_rel(evaldata->root, outer_rel, inner_rel,
JOIN_INNER);
@@ -266,9 +263,9 @@ desirable_join(PlannerInfo *root,
return true;
/*
- * Join if the rels are members of the same IN sub-select. This is
- * needed to improve the odds that we will find a valid solution in a
- * case where an IN sub-select has a clauseless join.
+ * Join if the rels are members of the same IN sub-select. This is needed
+ * to improve the odds that we will find a valid solution in a case where
+ * an IN sub-select has a clauseless join.
*/
foreach(l, root->in_info_list)
{
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index c027f4370c3..d7618c5d67d 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.50 2005/06/08 23:02:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.51 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,10 +106,9 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
random_init_pool(pool, &evaldata);
/* sort the pool according to cheapest path as fitness */
- sort_pool(pool); /* we have to do it only one time, since
- * all kids replace the worst individuals
- * in future (-> geqo_pool.c:spread_chromo
- * ) */
+ sort_pool(pool); /* we have to do it only one time, since all
+ * kids replace the worst individuals in
+ * future (-> geqo_pool.c:spread_chromo ) */
#ifdef GEQO_DEBUG
elog(DEBUG1, "GEQO selected %d pool entries, best %.2f, worst %.2f",
diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c
index 5afdcd7b8f5..ff5bd07e6ad 100644
--- a/src/backend/optimizer/geqo/geqo_misc.c
+++ b/src/backend/optimizer/geqo/geqo_misc.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.42 2004/12/31 21:59:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.43 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,10 +41,10 @@ avg_pool(Pool *pool)
elog(ERROR, "pool_size is zero");
/*
- * Since the pool may contain multiple occurrences of DBL_MAX, divide
- * by pool->size before summing, not after, to avoid overflow. This
- * loses a little in speed and accuracy, but this routine is only used
- * for debug printouts, so we don't care that much.
+ * Since the pool may contain multiple occurrences of DBL_MAX, divide by
+ * pool->size before summing, not after, to avoid overflow. This loses a
+ * little in speed and accuracy, but this routine is only used for debug
+ * printouts, so we don't care that much.
*/
for (i = 0; i < pool->size; i++)
cumulative += pool->data[i].worth / pool->size;
diff --git a/src/backend/optimizer/geqo/geqo_pool.c b/src/backend/optimizer/geqo/geqo_pool.c
index f6881c0f5ff..83927facae5 100644
--- a/src/backend/optimizer/geqo/geqo_pool.c
+++ b/src/backend/optimizer/geqo/geqo_pool.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.26 2004/12/31 21:59:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,13 +96,12 @@ random_init_pool(Pool *pool, GeqoEvalData *evaldata)
int bad = 0;
/*
- * We immediately discard any invalid individuals (those that
- * geqo_eval returns DBL_MAX for), thereby not wasting pool space on
- * them.
+ * We immediately discard any invalid individuals (those that geqo_eval
+ * returns DBL_MAX for), thereby not wasting pool space on them.
*
- * If we fail to make any valid individuals after 10000 tries, give up;
- * this probably means something is broken, and we shouldn't just let
- * ourselves get stuck in an infinite loop.
+ * If we fail to make any valid individuals after 10000 tries, give up; this
+ * probably means something is broken, and we shouldn't just let ourselves
+ * get stuck in an infinite loop.
*/
i = 0;
while (i < pool->size)
@@ -223,8 +222,8 @@ spread_chromo(Chromosome *chromo, Pool *pool)
/*
- * these 2 cases move the search indices since a new location has
- * not yet been found.
+ * these 2 cases move the search indices since a new location has not
+ * yet been found.
*/
else if (chromo->worth < pool->data[mid].worth)
@@ -242,8 +241,7 @@ spread_chromo(Chromosome *chromo, Pool *pool)
/* now we have index for chromo */
/*
- * move every gene from index on down one position to make room for
- * chromo
+ * move every gene from index on down one position to make room for chromo
*/
/*
diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c
index d2ebee17653..c73e5b2a79e 100644
--- a/src/backend/optimizer/geqo/geqo_recombination.c
+++ b/src/backend/optimizer/geqo/geqo_recombination.c
@@ -3,7 +3,7 @@
* geqo_recombination.c
* misc recombination procedures
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.14 2004/08/29 05:06:43 momjian Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.15 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,8 +62,8 @@ init_tour(Gene *tour, int num_gene)
}
/*
- * Since geqo_eval() will reject tours where tour[0] > tour[1], we may
- * as well switch the two to make it a valid tour.
+ * Since geqo_eval() will reject tours where tour[0] > tour[1], we may as
+ * well switch the two to make it a valid tour.
*/
if (num_gene >= 2 && tour[0] > tour[1])
{
@@ -86,8 +86,8 @@ alloc_city_table(int num_gene)
City *city_table;
/*
- * palloc one extra location so that nodes numbered 1..n can be
- * indexed directly; 0 will not be used
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
*/
city_table = (City *) palloc((num_gene + 1) * sizeof(City));
diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c
index 92b735cb282..32a3e83ae03 100644
--- a/src/backend/optimizer/geqo/geqo_selection.c
+++ b/src/backend/optimizer/geqo/geqo_selection.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.19 2005/06/14 14:21:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,13 +86,14 @@ linear(int pool_size, double bias) /* bias is y-intercept of linear
/*
* If geqo_rand() returns exactly 1.0 then we will get exactly max from
- * this equation, whereas we need 0 <= index < max. Also it seems possible
- * that roundoff error might deliver values slightly outside the range;
- * in particular avoid passing a value slightly less than 0 to sqrt().
- * If we get a bad value just try again.
+ * this equation, whereas we need 0 <= index < max. Also it seems
+ * possible that roundoff error might deliver values slightly outside the
+ * range; in particular avoid passing a value slightly less than 0 to
+ * sqrt(). If we get a bad value just try again.
*/
- do {
- double sqrtval;
+ do
+ {
+ double sqrtval;
sqrtval = (bias * bias) - 4.0 * (bias - 1.0) * geqo_rand();
if (sqrtval > 0.0)