aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2004-08-29 05:07:03 +0000
committerBruce Momjian <bruce@momjian.us>2004-08-29 05:07:03 +0000
commitb6b71b85bc45b49005b5aec87cba2c33fc8baf49 (patch)
treec23dbd1dbc43972a8e48327c8a771baf36952f3d /src/backend/executor
parent90cb9c305140684b2b00c739b724f67915e11404 (diff)
downloadpostgresql-b6b71b85bc45b49005b5aec87cba2c33fc8baf49.tar.gz
postgresql-b6b71b85bc45b49005b5aec87cba2c33fc8baf49.zip
Pgindent run for 8.0.
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execAmi.c44
-rw-r--r--src/backend/executor/execGrouping.c22
-rw-r--r--src/backend/executor/execJunk.c10
-rw-r--r--src/backend/executor/execMain.c18
-rw-r--r--src/backend/executor/execQual.c192
-rw-r--r--src/backend/executor/execScan.c6
-rw-r--r--src/backend/executor/execTuples.c26
-rw-r--r--src/backend/executor/execUtils.c9
-rw-r--r--src/backend/executor/functions.c37
-rw-r--r--src/backend/executor/nodeAgg.c45
-rw-r--r--src/backend/executor/nodeHashjoin.c14
-rw-r--r--src/backend/executor/nodeIndexscan.c85
-rw-r--r--src/backend/executor/nodeMergejoin.c10
-rw-r--r--src/backend/executor/nodeSeqscan.c12
-rw-r--r--src/backend/executor/nodeSubplan.c4
-rw-r--r--src/backend/executor/nodeUnique.c7
-rw-r--r--src/backend/executor/spi.c49
17 files changed, 305 insertions, 285 deletions
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index ad1b5817c4e..6cbad491eda 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.80 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.81 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@
* needs access to variables of the current outer tuple. (The handling of
* this parameter is currently pretty inconsistent: some callers pass NULL
* and some pass down their parent's value; so don't rely on it in other
- * situations. It'd probably be better to remove the whole thing and use
+ * situations. It'd probably be better to remove the whole thing and use
* the generalized parameter mechanism instead.)
*/
void
@@ -64,7 +64,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
/* If we have changed parameters, propagate that info */
if (node->chgParam != NULL)
{
- ListCell *l;
+ ListCell *l;
foreach(l, node->initPlan)
{
@@ -365,19 +365,19 @@ ExecMayReturnRawTuples(PlanState *node)
{
/*
* At a table scan node, we check whether ExecAssignScanProjectionInfo
- * decided to do projection or not. Most non-scan nodes always project
- * and so we can return "false" immediately. For nodes that don't
- * project but just pass up input tuples, we have to recursively
+ * decided to do projection or not. Most non-scan nodes always
+ * project and so we can return "false" immediately. For nodes that
+ * don't project but just pass up input tuples, we have to recursively
* examine the input plan node.
*
- * Note: Hash and Material are listed here because they sometimes
- * return an original input tuple, not a copy. But Sort and SetOp
- * never return an original tuple, so they can be treated like
- * projecting nodes.
+ * Note: Hash and Material are listed here because they sometimes return
+ * an original input tuple, not a copy. But Sort and SetOp never
+ * return an original tuple, so they can be treated like projecting
+ * nodes.
*/
switch (nodeTag(node))
{
- /* Table scan nodes */
+ /* Table scan nodes */
case T_SeqScanState:
case T_IndexScanState:
case T_TidScanState:
@@ -387,7 +387,7 @@ ExecMayReturnRawTuples(PlanState *node)
return true;
break;
- /* Non-projecting nodes */
+ /* Non-projecting nodes */
case T_HashState:
case T_MaterialState:
case T_UniqueState:
@@ -395,19 +395,19 @@ ExecMayReturnRawTuples(PlanState *node)
return ExecMayReturnRawTuples(node->lefttree);
case T_AppendState:
- {
- AppendState *appendstate = (AppendState *) node;
- int j;
-
- for (j = 0; j < appendstate->as_nplans; j++)
{
- if (ExecMayReturnRawTuples(appendstate->appendplans[j]))
- return true;
+ AppendState *appendstate = (AppendState *) node;
+ int j;
+
+ for (j = 0; j < appendstate->as_nplans; j++)
+ {
+ if (ExecMayReturnRawTuples(appendstate->appendplans[j]))
+ return true;
+ }
+ break;
}
- break;
- }
- /* All projecting node types come here */
+ /* All projecting node types come here */
default:
break;
}
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 44157fc6869..e31dc7dfcbe 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.10 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.11 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,8 +26,8 @@
static TupleHashTable CurTupleHashTable = NULL;
static uint32 TupleHashTableHash(const void *key, Size keysize);
-static int TupleHashTableMatch(const void *key1, const void *key2,
- Size keysize);
+static int TupleHashTableMatch(const void *key1, const void *key2,
+ Size keysize);
/*****************************************************************************
@@ -303,7 +303,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
Assert(entrysize >= sizeof(TupleHashEntryData));
hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
- sizeof(TupleHashTableData));
+ sizeof(TupleHashTableData));
hashtable->numCols = numCols;
hashtable->keyColIdx = keyColIdx;
@@ -321,7 +321,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
hash_ctl.hcxt = tablecxt;
hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
if (hashtable->hashtab == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
@@ -359,8 +359,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
/*
* Set up data needed by hash and match functions
*
- * We save and restore CurTupleHashTable just in case someone manages
- * to invoke this code re-entrantly.
+ * We save and restore CurTupleHashTable just in case someone manages to
+ * invoke this code re-entrantly.
*/
hashtable->tupdesc = tupdesc;
saveCurHT = CurTupleHashTable;
@@ -389,8 +389,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
/*
* Zero any caller-requested space in the entry. (This zaps
- * the "key data" dynahash.c copied into the new entry, but
- * we don't care since we're about to overwrite it anyway.)
+ * the "key data" dynahash.c copied into the new entry, but we
+ * don't care since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);
@@ -414,13 +414,13 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
*
* The passed-in key is a pointer to a HeapTuple pointer -- this is either
* the firstTuple field of a TupleHashEntry struct, or the key value passed
- * to hash_search. We ignore the keysize.
+ * to hash_search. We ignore the keysize.
*
* CurTupleHashTable must be set before calling this, since dynahash.c
* doesn't provide any API that would let us get at the hashtable otherwise.
*
* Also, the caller must select an appropriate memory context for running
- * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
+ * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
*/
static uint32
TupleHashTableHash(const void *key, Size keysize)
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index 43f58e036ea..c797c343d35 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.42 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.43 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -274,9 +274,9 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
* dealing with a small number of attributes. for large tuples we just
* use palloc.
*
- * Note: we could use just one set of arrays if we were willing to
- * assume that the resno mapping is monotonic... I think it is, but
- * won't take the risk of breaking things right now.
+ * Note: we could use just one set of arrays if we were willing to assume
+ * that the resno mapping is monotonic... I think it is, but won't
+ * take the risk of breaking things right now.
*/
if (cleanLength > 64)
{
@@ -309,7 +309,7 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
*/
for (i = 0; i < cleanLength; i++)
{
- int j = cleanMap[i] - 1;
+ int j = cleanMap[i] - 1;
values[i] = old_values[j];
nulls[i] = old_nulls[j];
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 6e386d25292..d77bc7054a3 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.235 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.236 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -521,8 +521,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
* Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all
*/
- ResultRelInfo *resultRelInfo;
- ListCell *l;
+ ResultRelInfo *resultRelInfo;
+ ListCell *l;
numResultRelations = list_length(resultRelations);
resultRelInfos = (ResultRelInfo *)
@@ -644,10 +644,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* Initialize the junk filter if needed. SELECT and INSERT queries
* need a filter if there are any junk attrs in the tlist. INSERT and
- * SELECT INTO also need a filter if the plan may return raw disk tuples
- * (else heap_insert will be scribbling on the source relation!).
- * UPDATE and DELETE always need a filter, since there's always a junk
- * 'ctid' attribute present --- no need to look first.
+ * SELECT INTO also need a filter if the plan may return raw disk
+ * tuples (else heap_insert will be scribbling on the source
+ * relation!). UPDATE and DELETE always need a filter, since there's
+ * always a junk 'ctid' attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -1460,7 +1460,7 @@ ldelete:;
&ctid,
estate->es_snapshot->curcid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */);
+ true /* wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
@@ -1596,7 +1596,7 @@ lreplace:;
&ctid,
estate->es_snapshot->curcid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */);
+ true /* wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 65721e0863d..2ad0423810b 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.167 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.168 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,51 +59,51 @@ static Datum ExecEvalArrayRef(ArrayRefExprState *astate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalAggref(AggrefExprState *aggref,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
List *argList, ExprContext *econtext);
static Datum ExecMakeFunctionResultNoSets(FuncExprState *fcache,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCaseTestExpr(ExprState *exprstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalArray(ArrayExprState *astate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalRow(RowExprState *rstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullTest(GenericExprState *nstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
@@ -114,14 +114,14 @@ static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCoerceToDomainValue(ExprState *exprstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFieldSelect(FieldSelectState *fstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFieldStore(FieldStoreState *fstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalRelabelType(GenericExprState *exprstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
@@ -145,7 +145,7 @@ static Datum ExecEvalRelabelType(GenericExprState *exprstate,
*
* Note: for notational simplicity we declare these functions as taking the
* specific type of ExprState that they work on. This requires casting when
- * assigning the function pointer in ExecInitExpr. Be careful that the
+ * assigning the function pointer in ExecInitExpr. Be careful that the
* function signature is declared correctly, because the cast suppresses
* automatic checking!
*
@@ -236,13 +236,13 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
isDone));
/*
- * If refexpr yields NULL, and it's a fetch, then result is NULL.
- * In the assignment case, we'll cons up something below.
+ * If refexpr yields NULL, and it's a fetch, then result is NULL. In
+ * the assignment case, we'll cons up something below.
*/
if (*isNull)
{
if (isDone && *isDone == ExprEndResult)
- return (Datum) NULL; /* end of set result */
+ return (Datum) NULL; /* end of set result */
if (!isAssignment)
return (Datum) NULL;
}
@@ -321,10 +321,11 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
*
* XXX At some point we'll need to look into making the old value of
* the array element available via CaseTestExpr, as is done by
- * ExecEvalFieldStore. This is not needed now but will be needed
- * to support arrays of composite types; in an assignment to a field
- * of an array member, the parser would generate a FieldStore that
- * expects to fetch its input tuple via CaseTestExpr.
+ * ExecEvalFieldStore. This is not needed now but will be needed
+ * to support arrays of composite types; in an assignment to a
+ * field of an array member, the parser would generate a
+ * FieldStore that expects to fetch its input tuple via
+ * CaseTestExpr.
*/
sourceData = ExecEvalExpr(astate->refassgnexpr,
econtext,
@@ -339,15 +340,16 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
return PointerGetDatum(array_source);
/*
- * For an assignment, if all the subscripts and the input expression
- * are non-null but the original array is null, then substitute an
- * empty (zero-dimensional) array and proceed with the assignment.
- * This only works for varlena arrays, though; for fixed-length
- * array types we punt and return the null input array.
+ * For an assignment, if all the subscripts and the input
+ * expression are non-null but the original array is null, then
+ * substitute an empty (zero-dimensional) array and proceed with
+ * the assignment. This only works for varlena arrays, though; for
+ * fixed-length array types we punt and return the null input
+ * array.
*/
if (*isNull)
{
- if (astate->refattrlength > 0) /* fixed-length array? */
+ if (astate->refattrlength > 0) /* fixed-length array? */
return PointerGetDatum(array_source);
array_source = construct_md_array(NULL, 0, NULL, NULL,
@@ -444,10 +446,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
/*
* Get the slot and attribute number we want
*
- * The asserts check that references to system attributes only appear
- * at the level of a relation scan; at higher levels, system attributes
- * must be treated as ordinary variables (since we no longer have access
- * to the original tuple).
+ * The asserts check that references to system attributes only appear at
+ * the level of a relation scan; at higher levels, system attributes
+ * must be treated as ordinary variables (since we no longer have
+ * access to the original tuple).
*/
attnum = variable->varattno;
@@ -476,8 +478,8 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
tuple_type = slot->ttc_tupleDescriptor;
/*
- * Some checks that are only applied for user attribute numbers
- * (bogus system attnums will be caught inside heap_getattr).
+ * Some checks that are only applied for user attribute numbers (bogus
+ * system attnums will be caught inside heap_getattr).
*/
if (attnum > 0)
{
@@ -488,9 +490,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
tuple_type->attrs[attnum - 1] != NULL);
/*
- * If the attribute's column has been dropped, we force a NULL result.
- * This case should not happen in normal use, but it could happen if
- * we are executing a plan cached before the column was dropped.
+ * If the attribute's column has been dropped, we force a NULL
+ * result. This case should not happen in normal use, but it could
+ * happen if we are executing a plan cached before the column was
+ * dropped.
*/
if (tuple_type->attrs[attnum - 1]->attisdropped)
{
@@ -499,13 +502,14 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
}
/*
- * This assert checks that the datatype the plan expects to get (as
- * told by our "variable" argument) is in fact the datatype of the
- * attribute being fetched (as seen in the current context, identified
- * by our "econtext" argument). Otherwise crashes are likely.
+ * This assert checks that the datatype the plan expects to get
+ * (as told by our "variable" argument) is in fact the datatype of
+ * the attribute being fetched (as seen in the current context,
+ * identified by our "econtext" argument). Otherwise crashes are
+ * likely.
*
- * Note that we can't check dropped columns, since their atttypid
- * has been zeroed.
+ * Note that we can't check dropped columns, since their atttypid has
+ * been zeroed.
*/
Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid);
}
@@ -590,7 +594,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
else
{
/*
- * All other parameter types must be sought in ecxt_param_list_info.
+ * All other parameter types must be sought in
+ * ecxt_param_list_info.
*/
ParamListInfo paramInfo;
@@ -964,7 +969,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
{
RegisterExprContextCallback(econtext,
ShutdownFuncExpr,
- PointerGetDatum(fcache));
+ PointerGetDatum(fcache));
fcache->shutdown_reg = true;
}
}
@@ -1006,8 +1011,8 @@ ExecMakeFunctionResult(FuncExprState *fcache,
*
* We change the ExprState function pointer to use the simpler
* ExecMakeFunctionResultNoSets on subsequent calls. This amounts
- * to assuming that no argument can return a set if it didn't do so
- * the first time.
+ * to assuming that no argument can return a set if it didn't do
+ * so the first time.
*/
fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets;
@@ -1098,7 +1103,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache,
}
}
}
- /* fcinfo.isnull = false; */ /* handled by MemSet */
+ /* fcinfo.isnull = false; */ /* handled by MemSet */
result = FunctionCallInvoke(&fcinfo);
*isNull = fcinfo.isnull;
@@ -1273,9 +1278,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
break;
/*
- * Can't do anything useful with NULL rowtype values. Currently
- * we raise an error, but another alternative is to just ignore
- * the result and "continue" to get another row.
+ * Can't do anything useful with NULL rowtype values.
+ * Currently we raise an error, but another alternative is to
+ * just ignore the result and "continue" to get another row.
*/
if (returnsTuple && fcinfo.isnull)
ereport(ERROR,
@@ -1293,13 +1298,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
{
/*
* Use the type info embedded in the rowtype Datum to
- * look up the needed tupdesc. Make a copy for the query.
+ * look up the needed tupdesc. Make a copy for the
+ * query.
*/
- HeapTupleHeader td;
+ HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td),
- HeapTupleHeaderGetTypMod(td));
+ HeapTupleHeaderGetTypMod(td));
tupdesc = CreateTupleDescCopy(tupdesc);
}
else
@@ -1326,7 +1332,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*/
if (returnsTuple)
{
- HeapTupleHeader td;
+ HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
@@ -1826,10 +1832,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
*isDone = ExprSingleResult;
/*
- * If there's a test expression, we have to evaluate it and save
- * the value where the CaseTestExpr placeholders can find it.
- * We must save and restore prior setting of econtext's caseValue fields,
- * in case this node is itself within a larger CASE.
+ * If there's a test expression, we have to evaluate it and save the
+ * value where the CaseTestExpr placeholders can find it. We must save
+ * and restore prior setting of econtext's caseValue fields, in case
+ * this node is itself within a larger CASE.
*/
save_datum = econtext->caseValue_datum;
save_isNull = econtext->caseValue_isNull;
@@ -1838,7 +1844,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
{
econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg,
econtext,
- &econtext->caseValue_isNull,
+ &econtext->caseValue_isNull,
NULL);
}
@@ -2009,7 +2015,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot merge incompatible arrays"),
errdetail("Array with element type %s cannot be "
- "included in ARRAY construct with element type %s.",
+ "included in ARRAY construct with element type %s.",
format_type_be(ARR_ELEMTYPE(array)),
format_type_be(element_type))));
@@ -2021,8 +2027,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
if (ndims <= 0 || ndims > MAXDIM)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds " \
- "the maximum allowed (%d)", ndims, MAXDIM)));
+ errmsg("number of array dimensions (%d) exceeds " \
+ "the maximum allowed (%d)", ndims, MAXDIM)));
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
@@ -2600,18 +2606,18 @@ ExecEvalFieldStore(FieldStoreState *fstate,
forboth(l1, fstate->newvals, l2, fstore->fieldnums)
{
- ExprState *newval = (ExprState *) lfirst(l1);
- AttrNumber fieldnum = lfirst_int(l2);
+ ExprState *newval = (ExprState *) lfirst(l1);
+ AttrNumber fieldnum = lfirst_int(l2);
bool eisnull;
Assert(fieldnum > 0 && fieldnum <= tupDesc->natts);
/*
- * Use the CaseTestExpr mechanism to pass down the old value of the
- * field being replaced; this is useful in case we have a nested field
- * update situation. It's safe to reuse the CASE mechanism because
- * there cannot be a CASE between here and where the value would be
- * needed.
+ * Use the CaseTestExpr mechanism to pass down the old value of
+ * the field being replaced; this is useful in case we have a
+ * nested field update situation. It's safe to reuse the CASE
+ * mechanism because there cannot be a CASE between here and where
+ * the value would be needed.
*/
econtext->caseValue_datum = values[fieldnum - 1];
econtext->caseValue_isNull = (nulls[fieldnum - 1] == 'n');
@@ -2981,7 +2987,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_RowExpr:
{
- RowExpr *rowexpr = (RowExpr *) node;
+ RowExpr *rowexpr = (RowExpr *) node;
RowExprState *rstate = makeNode(RowExprState);
Form_pg_attribute *attrs;
List *outlist = NIL;
@@ -3016,15 +3022,15 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Guard against ALTER COLUMN TYPE on rowtype
* since the RowExpr was created. XXX should we
- * check typmod too? Not sure we can be sure it'll
- * be the same.
+ * check typmod too? Not sure we can be sure
+ * it'll be the same.
*/
if (exprType((Node *) e) != attrs[i]->atttypid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("ROW() column has type %s instead of type %s",
- format_type_be(exprType((Node *) e)),
- format_type_be(attrs[i]->atttypid))));
+ format_type_be(exprType((Node *) e)),
+ format_type_be(attrs[i]->atttypid))));
}
else
{
@@ -3111,7 +3117,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
TargetEntry *tle = (TargetEntry *) node;
GenericExprState *gstate = makeNode(GenericExprState);
- gstate->xprstate.evalfunc = NULL; /* not used */
+ gstate->xprstate.evalfunc = NULL; /* not used */
gstate->arg = ExecInitExpr(tle->expr, parent);
state = (ExprState *) gstate;
}
@@ -3546,8 +3552,8 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
/*
* store the tuple in the projection slot and return the slot.
*/
- return ExecStoreTuple(newTuple, /* tuple to store */
- slot, /* slot to store in */
- InvalidBuffer, /* tuple has no buffer */
+ return ExecStoreTuple(newTuple, /* tuple to store */
+ slot, /* slot to store in */
+ InvalidBuffer, /* tuple has no buffer */
true);
}
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index fd123bbd551..6adefdc2666 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.32 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.33 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -224,8 +224,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc
return false; /* tlist too long */
/*
- * If the plan context requires a particular hasoid setting, then
- * that has to match, too.
+ * If the plan context requires a particular hasoid setting, then that
+ * has to match, too.
*/
if (ExecContextForcesOids(ps, &hasoid) &&
hasoid != tupdesc->tdhasoid)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 98f4d503e8c..92d6cd43743 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.81 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.82 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -117,7 +117,7 @@
static TupleDesc ExecTypeFromTLInternal(List *targetList,
- bool hasoid, bool skipjunk);
+ bool hasoid, bool skipjunk);
/* ----------------------------------------------------------------
@@ -149,7 +149,7 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in
/*
* Now allocate our new table along with space for the pointers to the
- * tuples. Zero out the slots.
+ * tuples. Zero out the slots.
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData));
@@ -568,10 +568,10 @@ ExecCleanTypeFromTL(List *targetList, bool hasoid)
static TupleDesc
ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
{
- TupleDesc typeInfo;
- ListCell *l;
- int len;
- int cur_resno = 1;
+ TupleDesc typeInfo;
+ ListCell *l;
+ int len;
+ int cur_resno = 1;
if (skipjunk)
len = ExecCleanTargetListLength(targetList);
@@ -581,8 +581,8 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
foreach(l, targetList)
{
- TargetEntry *tle = lfirst(l);
- Resdom *resdom = tle->resdom;
+ TargetEntry *tle = lfirst(l);
+ Resdom *resdom = tle->resdom;
if (skipjunk && resdom->resjunk)
continue;
@@ -605,16 +605,16 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
TupleDesc
ExecTypeFromExprList(List *exprList)
{
- TupleDesc typeInfo;
- ListCell *l;
- int cur_resno = 1;
+ TupleDesc typeInfo;
+ ListCell *l;
+ int cur_resno = 1;
char fldname[NAMEDATALEN];
typeInfo = CreateTemplateTupleDesc(list_length(exprList), false);
foreach(l, exprList)
{
- Node *e = lfirst(l);
+ Node *e = lfirst(l);
sprintf(fldname, "f%d", cur_resno);
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 1e5694a9269..79ab787b07a 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.113 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.114 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -179,7 +179,7 @@ CreateExecutorState(void)
*/
estate->es_direction = ForwardScanDirection;
estate->es_snapshot = SnapshotNow;
- estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */
+ estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */
estate->es_range_table = NIL;
estate->es_result_relations = NULL;
@@ -248,7 +248,8 @@ FreeExecutorState(EState *estate)
*/
while (estate->es_exprcontexts)
{
- /* XXX: seems there ought to be a faster way to implement this
+ /*
+ * XXX: seems there ought to be a faster way to implement this
* than repeated list_delete(), no?
*/
FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts));
@@ -364,7 +365,7 @@ FreeExprContext(ExprContext *econtext)
* ReScanExprContext
*
* Reset an expression context in preparation for a rescan of its
- * plan node. This requires calling any registered shutdown callbacks,
+ * plan node. This requires calling any registered shutdown callbacks,
* since any partially complete set-returning-functions must be canceled.
*
* Note we make no assumption about the caller's memory context.
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index f06fabb5fc8..ea3b12be5f6 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.85 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.86 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@ typedef struct local_es
*/
typedef struct
{
- Oid *argtypes; /* resolved types of arguments */
+ Oid *argtypes; /* resolved types of arguments */
Oid rettype; /* actual return type */
int typlen; /* length of the return type */
bool typbyval; /* true if return type is pass by value */
@@ -94,7 +94,7 @@ init_execution_state(List *queryTree_list)
{
execution_state *firstes = NULL;
execution_state *preves = NULL;
- ListCell *qtl_item;
+ ListCell *qtl_item;
foreach(qtl_item, queryTree_list)
{
@@ -180,8 +180,8 @@ init_sql_fcache(FmgrInfo *finfo)
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * get the type length and by-value flag from the type tuple; also
- * do a preliminary check for returnsTuple (this may prove inaccurate,
+ * get the type length and by-value flag from the type tuple; also do
+ * a preliminary check for returnsTuple (this may prove inaccurate,
* see below).
*/
fcache->typlen = typeStruct->typlen;
@@ -190,8 +190,8 @@ init_sql_fcache(FmgrInfo *finfo)
rettype == RECORDOID);
/*
- * Parse and rewrite the queries. We need the argument type info to pass
- * to the parser.
+ * Parse and rewrite the queries. We need the argument type info to
+ * pass to the parser.
*/
nargs = procedureStruct->pronargs;
haspolyarg = false;
@@ -240,11 +240,11 @@ init_sql_fcache(FmgrInfo *finfo)
* If the function has any arguments declared as polymorphic types,
* then it wasn't type-checked at definition time; must do so now.
*
- * Also, force a type-check if the declared return type is a rowtype;
- * we need to find out whether we are actually returning the whole
- * tuple result, or just regurgitating a rowtype expression result.
- * In the latter case we clear returnsTuple because we need not act
- * different from the scalar result case.
+ * Also, force a type-check if the declared return type is a rowtype; we
+ * need to find out whether we are actually returning the whole tuple
+ * result, or just regurgitating a rowtype expression result. In the
+ * latter case we clear returnsTuple because we need not act different
+ * from the scalar result case.
*/
if (haspolyarg || fcache->returnsTuple)
fcache->returnsTuple = check_sql_fn_retval(rettype,
@@ -395,9 +395,9 @@ postquel_execute(execution_state *es,
* XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end.
*/
- HeapTupleHeader dtup;
- Oid dtuptype;
- int32 dtuptypmod;
+ HeapTupleHeader dtup;
+ Oid dtuptype;
+ int32 dtuptypmod;
dtup = (HeapTupleHeader) palloc(tup->t_len);
memcpy((char *) dtup, (char *) tup->t_data, tup->t_len);
@@ -433,8 +433,8 @@ postquel_execute(execution_state *es,
else
{
/*
- * Returning a scalar, which we have to extract from the
- * first column of the SELECT result, and then copy into current
+ * Returning a scalar, which we have to extract from the first
+ * column of the SELECT result, and then copy into current
* execution context if needed.
*/
value = heap_getattr(tup, 1, tupDesc, &(fcinfo->isnull));
@@ -635,7 +635,8 @@ sql_exec_error_callback(void *arg)
fn_name = NameStr(functup->proname);
/*
- * If there is a syntax error position, convert to internal syntax error
+ * If there is a syntax error position, convert to internal syntax
+ * error
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 99173a17a07..b31cd8b0e97 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -45,7 +45,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.124 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.125 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -252,11 +252,11 @@ initialize_aggregates(AggState *aggstate,
}
/*
- * If we are reinitializing after a group boundary, we have to free
- * any prior transValue to avoid memory leakage. We must check not
- * only the isnull flag but whether the pointer is NULL; since
- * pergroupstate is initialized with palloc0, the initial condition
- * has isnull = 0 and null pointer.
+ * If we are reinitializing after a group boundary, we have to
+ * free any prior transValue to avoid memory leakage. We must
+ * check not only the isnull flag but whether the pointer is NULL;
+ * since pergroupstate is initialized with palloc0, the initial
+ * condition has isnull = 0 and null pointer.
*/
if (!peraggstate->transtypeByVal &&
!pergroupstate->transValueIsNull &&
@@ -811,14 +811,14 @@ agg_retrieve_direct(AggState *aggstate)
/*
* If we have no first tuple (ie, the outerPlan didn't return
* anything), create a dummy all-nulls input tuple for use by
- * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles,
- * because ordinarily the projected output tuple's targetlist
- * cannot contain any direct (non-aggregated) references to input
- * columns, so the dummy tuple will not be referenced. However
- * there are special cases where this isn't so --- in particular
- * an UPDATE involving an aggregate will have a targetlist
- * reference to ctid. We need to return a null for ctid in that
- * situation, not coredump.
+ * ExecQual/ExecProject. 99.44% of the time this is a waste of
+ * cycles, because ordinarily the projected output tuple's
+ * targetlist cannot contain any direct (non-aggregated)
+ * references to input columns, so the dummy tuple will not be
+ * referenced. However there are special cases where this isn't so
+ * --- in particular an UPDATE involving an aggregate will have a
+ * targetlist reference to ctid. We need to return a null for
+ * ctid in that situation, not coredump.
*
* The values returned for the aggregates will be the initial values
* of the transition functions.
@@ -865,9 +865,9 @@ agg_retrieve_direct(AggState *aggstate)
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate results
- * and the representative input tuple. Note we do not support
- * aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate
+ * results and the representative input tuple. Note we do not
+ * support aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
@@ -1009,9 +1009,9 @@ agg_retrieve_hash_table(AggState *aggstate)
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate results
- * and the representative input tuple. Note we do not support
- * aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate
+ * results and the representative input tuple. Note we do not
+ * support aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
@@ -1478,7 +1478,10 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
}
else
{
- /* Reset the per-group state (in particular, mark transvalues null) */
+ /*
+ * Reset the per-group state (in particular, mark transvalues
+ * null)
+ */
MemSet(node->pergroup, 0,
sizeof(AggStatePerGroupData) * node->numaggs);
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index f941ec32890..fbc55655714 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.63 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.64 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -559,7 +559,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
if (nread != sizeof(HeapTupleData))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: %m")));
+ errmsg("could not read from hash-join temporary file: %m")));
heapTuple = palloc(HEAPTUPLESIZE + htup.t_len);
memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData));
heapTuple->t_datamcxt = CurrentMemoryContext;
@@ -569,7 +569,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
if (nread != (size_t) htup.t_len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: %m")));
+ errmsg("could not read from hash-join temporary file: %m")));
return ExecStoreTuple(heapTuple, tupleSlot, InvalidBuffer, true);
}
@@ -627,14 +627,14 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
innerFile = hashtable->innerBatchFile[newbatch - 1];
if (BufFileSeek(innerFile, 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
/*
* Reload the hash table with the new inner batch
@@ -685,12 +685,12 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple,
if (written != sizeof(HeapTupleData))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to hash-join temporary file: %m")));
+ errmsg("could not write to hash-join temporary file: %m")));
written = BufFileWrite(file, (void *) heapTuple->t_data, heapTuple->t_len);
if (written != (size_t) heapTuple->t_len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to hash-join temporary file: %m")));
+ errmsg("could not write to hash-join temporary file: %m")));
}
void
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index eb7b5720359..2ff0121baff 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.96 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.97 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@
* In a multiple-index plan, we must take care to return any given tuple
* only once, even if it matches conditions of several index scans. Our
* preferred way to do this is to record already-returned tuples in a hash
- * table (using the TID as unique identifier). However, in a very large
+ * table (using the TID as unique identifier). However, in a very large
* scan this could conceivably run out of memory. We limit the hash table
* to no more than work_mem KB; if it grows past that, we fall back to the
* pre-7.4 technique: evaluate the prior-scan index quals again for each
@@ -129,11 +129,11 @@ IndexNext(IndexScanState *node)
scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
/*
- * Clear any reference to the previously returned tuple. The idea here
- * is to not have the tuple slot be the last holder of a pin on that
- * tuple's buffer; if it is, we'll need a separate visit to the bufmgr
- * to release the buffer. By clearing here, we get to have the release
- * done by ReleaseAndReadBuffer inside index_getnext.
+ * Clear any reference to the previously returned tuple. The idea
+ * here is to not have the tuple slot be the last holder of a pin on
+ * that tuple's buffer; if it is, we'll need a separate visit to the
+ * bufmgr to release the buffer. By clearing here, we get to have the
+ * release done by ReleaseAndReadBuffer inside index_getnext.
*/
ExecClearTuple(slot);
@@ -215,8 +215,9 @@ IndexNext(IndexScanState *node)
false); /* don't pfree */
/*
- * If any of the index operators involved in this scan are lossy,
- * recheck them by evaluating the original operator clauses.
+ * If any of the index operators involved in this scan are
+ * lossy, recheck them by evaluating the original operator
+ * clauses.
*/
if (lossyQual)
{
@@ -224,15 +225,19 @@ IndexNext(IndexScanState *node)
ResetExprContext(econtext);
if (!ExecQual(lossyQual, econtext, false))
{
- /* Fails lossy op, so drop it and loop back for another */
+ /*
+ * Fails lossy op, so drop it and loop back for
+ * another
+ */
ExecClearTuple(slot);
continue;
}
}
/*
- * If it's a multiple-index scan, make sure not to double-report
- * a tuple matched by more than one index. (See notes above.)
+ * If it's a multiple-index scan, make sure not to
+ * double-report a tuple matched by more than one index. (See
+ * notes above.)
*/
if (numIndices > 1)
{
@@ -240,7 +245,7 @@ IndexNext(IndexScanState *node)
if (node->iss_DupHash)
{
DupHashTabEntry *entry;
- bool found;
+ bool found;
entry = (DupHashTabEntry *)
hash_search(node->iss_DupHash,
@@ -248,7 +253,7 @@ IndexNext(IndexScanState *node)
HASH_ENTER,
&found);
if (entry == NULL ||
- node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
+ node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
{
/* out of memory (either hard or soft limit) */
/* release hash table and fall thru to old code */
@@ -679,10 +684,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
* initialize child expressions
*
* Note: we don't initialize all of the indxqual expression, only the
- * sub-parts corresponding to runtime keys (see below). The indxqualorig
- * expression is always initialized even though it will only be used in
- * some uncommon cases --- would be nice to improve that. (Problem is
- * that any SubPlans present in the expression must be found now...)
+ * sub-parts corresponding to runtime keys (see below). The
+ * indxqualorig expression is always initialized even though it will
+ * only be used in some uncommon cases --- would be nice to improve
+ * that. (Problem is that any SubPlans present in the expression must
+ * be found now...)
*/
indexstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->scan.plan.targetlist,
@@ -788,14 +794,14 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
lossyflag_cell = list_head(lossyflags);
for (j = 0; j < n_keys; j++)
{
- OpExpr *clause; /* one clause of index qual */
- Expr *leftop; /* expr on lhs of operator */
- Expr *rightop; /* expr on rhs ... */
+ OpExpr *clause; /* one clause of index qual */
+ Expr *leftop; /* expr on lhs of operator */
+ Expr *rightop; /* expr on rhs ... */
int flags = 0;
AttrNumber varattno; /* att number used in scan */
StrategyNumber strategy; /* op's strategy number */
- Oid subtype; /* op's strategy subtype */
- int lossy; /* op's recheck flag */
+ Oid subtype; /* op's strategy subtype */
+ int lossy; /* op's recheck flag */
RegProcedure opfuncid; /* operator proc id used in scan */
Datum scanvalue; /* value used in scan (if const) */
@@ -819,15 +825,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
/*
* Here we figure out the contents of the index qual. The
* usual case is (var op const) which means we form a scan key
- * for the attribute listed in the var node and use the value of
- * the const as comparison data.
+ * for the attribute listed in the var node and use the value
+ * of the const as comparison data.
*
* If we don't have a const node, it means our scan key is a
- * function of information obtained during the execution of the
- * plan, in which case we need to recalculate the index scan key
- * at run time. Hence, we set have_runtime_keys to true and place
- * the appropriate subexpression in run_keys. The corresponding
- * scan key values are recomputed at run time.
+ * function of information obtained during the execution of
+ * the plan, in which case we need to recalculate the index
+ * scan key at run time. Hence, we set have_runtime_keys to
+ * true and place the appropriate subexpression in run_keys.
+ * The corresponding scan key values are recomputed at run
+ * time.
*/
run_keys[j] = NULL;
@@ -892,18 +899,18 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
scanvalue); /* constant */
/*
- * If this operator is lossy, add its indxqualorig
- * expression to the list of quals to recheck. The
- * list_nth() calls here could be avoided by chasing the
- * lists in parallel to all the other lists, but since
- * lossy operators are very uncommon, it's probably a
- * waste of time to do so.
+ * If this operator is lossy, add its indxqualorig expression
+ * to the list of quals to recheck. The list_nth() calls here
+ * could be avoided by chasing the lists in parallel to all
+ * the other lists, but since lossy operators are very
+ * uncommon, it's probably a waste of time to do so.
*/
if (lossy)
{
- List *qualOrig = indexstate->indxqualorig;
+ List *qualOrig = indexstate->indxqualorig;
+
lossyQuals[i] = lappend(lossyQuals[i],
- list_nth((List *) list_nth(qualOrig, i), j));
+ list_nth((List *) list_nth(qualOrig, i), j));
}
}
@@ -1037,7 +1044,7 @@ create_duphash(IndexScanState *node)
node->iss_DupHash = hash_create("DupHashTable",
nbuckets,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
if (node->iss_DupHash == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 585eee19fe0..e913757d2ee 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.67 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.68 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,10 +104,10 @@ static void
MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals,
PlanState *parent)
{
- List *ltexprs,
- *gtexprs;
- ListCell *ltcdr,
- *gtcdr;
+ List *ltexprs,
+ *gtexprs;
+ ListCell *ltcdr,
+ *gtcdr;
/*
* Make modifiable copies of the qualList.
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 7a4c0cc80bb..f3976c872a5 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.49 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.50 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,11 +62,11 @@ SeqNext(SeqScanState *node)
slot = node->ss_ScanTupleSlot;
/*
- * Clear any reference to the previously returned tuple. The idea here
- * is to not have the tuple slot be the last holder of a pin on that
- * tuple's buffer; if it is, we'll need a separate visit to the bufmgr
- * to release the buffer. By clearing here, we get to have the release
- * done by ReleaseAndReadBuffer inside heap_getnext.
+ * Clear any reference to the previously returned tuple. The idea
+ * here is to not have the tuple slot be the last holder of a pin on
+ * that tuple's buffer; if it is, we'll need a separate visit to the
+ * bufmgr to release the buffer. By clearing here, we get to have the
+ * release done by ReleaseAndReadBuffer inside heap_getnext.
*/
ExecClearTuple(slot);
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 028640c4b91..0a35b111109 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.64 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.65 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -912,7 +912,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
SubLinkType subLinkType = subplan->subLinkType;
MemoryContext oldcontext;
TupleTableSlot *slot;
- ListCell *l;
+ ListCell *l;
bool found = false;
ArrayBuildState *astate = NULL;
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index 183068a3198..3b71629ad50 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.43 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.44 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,8 +109,9 @@ ExecUnique(UniqueState *node)
* he next calls us.
*
* tgl 3/2004: the above concern is no longer valid; junkfilters used to
- * modify their input's return slot but don't anymore, and I don't think
- * anyplace else does either. Not worth changing this code though.
+ * modify their input's return slot but don't anymore, and I don't
+ * think anyplace else does either. Not worth changing this code
+ * though.
*/
if (node->priorTuple != NULL)
heap_freetuple(node->priorTuple);
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index e2f7800a0bb..4ffb27b0139 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.124 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.125 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,17 +29,17 @@ int SPI_result;
static _SPI_connection *_SPI_stack = NULL;
static _SPI_connection *_SPI_current = NULL;
-static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
+static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
static int _SPI_connected = -1;
static int _SPI_curid = -1;
static int _SPI_execute(const char *src, int tcount, _SPI_plan *plan);
-static int _SPI_pquery(QueryDesc *queryDesc, bool runit,
- bool useCurrentSnapshot, int tcount);
+static int _SPI_pquery(QueryDesc *queryDesc, bool runit,
+ bool useCurrentSnapshot, int tcount);
static int _SPI_execute_plan(_SPI_plan *plan,
- Datum *Values, const char *Nulls,
- bool useCurrentSnapshot, int tcount);
+ Datum *Values, const char *Nulls,
+ bool useCurrentSnapshot, int tcount);
static void _SPI_error_callback(void *arg);
@@ -60,7 +60,7 @@ static bool _SPI_checktuples(void);
int
SPI_connect(void)
{
- int newdepth;
+ int newdepth;
/*
* When procedure called by Executor _SPI_curid expected to be equal
@@ -107,9 +107,9 @@ SPI_connect(void)
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context,
- * but we may not be inside a portal (consider deferred-trigger
- * execution). Perhaps CurTransactionContext would do? For now it
+ * XXX it would be better to use PortalContext as the parent context, but
+ * we may not be inside a portal (consider deferred-trigger
+ * execution). Perhaps CurTransactionContext would do? For now it
* doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
@@ -201,7 +201,7 @@ AtEOXact_SPI(bool isCommit)
void
AtEOSubXact_SPI(bool isCommit, TransactionId childXid)
{
- bool found = false;
+ bool found = false;
while (_SPI_connected >= 0)
{
@@ -213,10 +213,10 @@ AtEOSubXact_SPI(bool isCommit, TransactionId childXid)
found = true;
/*
- * Pop the stack entry and reset global variables. Unlike
+ * Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that
- * might be already gone, or deleting memory contexts that have been
- * or will be thrown away anyway.
+ * might be already gone, or deleting memory contexts that have
+ * been or will be thrown away anyway.
*/
_SPI_connected--;
_SPI_curid = _SPI_connected;
@@ -418,7 +418,7 @@ HeapTupleHeader
SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
{
MemoryContext oldcxt = NULL;
- HeapTupleHeader dtup;
+ HeapTupleHeader dtup;
if (tuple == NULL || tupdesc == NULL)
{
@@ -936,7 +936,7 @@ SPI_cursor_close(Portal portal)
Oid
SPI_getargtypeid(void *plan, int argIndex)
{
- if (plan == NULL || argIndex < 0 || argIndex >= ((_SPI_plan*)plan)->nargs)
+ if (plan == NULL || argIndex < 0 || argIndex >= ((_SPI_plan *) plan)->nargs)
{
SPI_result = SPI_ERROR_ARGUMENT;
return InvalidOid;
@@ -965,13 +965,13 @@ SPI_getargcount(void *plan)
* if the command can be used with SPI_cursor_open
*
* Parameters
- * plan A plan previously prepared using SPI_prepare
+ * plan A plan previously prepared using SPI_prepare
*/
bool
SPI_is_cursor_plan(void *plan)
{
- _SPI_plan *spiplan = (_SPI_plan *) plan;
- List *qtlist;
+ _SPI_plan *spiplan = (_SPI_plan *) plan;
+ List *qtlist;
if (spiplan == NULL)
{
@@ -982,7 +982,7 @@ SPI_is_cursor_plan(void *plan)
qtlist = spiplan->qtlist;
if (list_length(spiplan->ptlist) == 1 && list_length(qtlist) == 1)
{
- Query *queryTree = (Query *) linitial((List *) linitial(qtlist));
+ Query *queryTree = (Query *) linitial((List *) linitial(qtlist));
if (queryTree->commandType == CMD_SELECT && queryTree->into == NULL)
return true;
@@ -993,7 +993,7 @@ SPI_is_cursor_plan(void *plan)
/*
* SPI_result_code_string --- convert any SPI return code to a string
*
- * This is often useful in error messages. Most callers will probably
+ * This is often useful in error messages. Most callers will probably
* only pass negative (error-case) codes, but for generality we recognize
* the success codes too.
*/
@@ -1483,8 +1483,8 @@ _SPI_error_callback(void *arg)
int syntaxerrposition;
/*
- * If there is a syntax error position, convert to internal syntax error;
- * otherwise treat the query as an item of context stack
+ * If there is a syntax error position, convert to internal syntax
+ * error; otherwise treat the query as an item of context stack
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
@@ -1632,7 +1632,8 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
- else /* (this case not currently used) */
+ else
+/* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*