aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/analyze.c113
-rw-r--r--src/backend/commands/async.c10
-rw-r--r--src/backend/commands/cluster.c43
-rw-r--r--src/backend/commands/command.c661
-rw-r--r--src/backend/commands/comment.c4
-rw-r--r--src/backend/commands/copy.c84
-rw-r--r--src/backend/commands/creatinh.c59
-rw-r--r--src/backend/commands/dbcommands.c73
-rw-r--r--src/backend/commands/define.c63
-rw-r--r--src/backend/commands/explain.c4
-rw-r--r--src/backend/commands/indexcmds.c86
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/remove.c6
-rw-r--r--src/backend/commands/rename.c19
-rw-r--r--src/backend/commands/sequence.c87
-rw-r--r--src/backend/commands/trigger.c78
-rw-r--r--src/backend/commands/user.c17
-rw-r--r--src/backend/commands/vacuum.c243
-rw-r--r--src/backend/commands/variable.c123
-rw-r--r--src/backend/commands/view.c26
20 files changed, 938 insertions, 863 deletions
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 4db0068da82..f4e056bd0a7 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.14 2001/02/16 03:16:58 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,9 +86,10 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
CommitTransactionCommand();
return;
}
+
/*
- * We can VACUUM ANALYZE any table except pg_statistic.
- * see update_relstats
+ * We can VACUUM ANALYZE any table except pg_statistic. see
+ * update_relstats
*/
if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname),
StatisticRelationName) == 0)
@@ -104,10 +105,12 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel),
RELNAME))
{
- /* we already did an elog during vacuum
- elog(NOTICE, "Skipping \"%s\" --- only table owner can VACUUM it",
- RelationGetRelationName(onerel));
- */
+
+ /*
+ * we already did an elog during vacuum elog(NOTICE, "Skipping
+ * \"%s\" --- only table owner can VACUUM it",
+ * RelationGetRelationName(onerel));
+ */
heap_close(onerel, NoLock);
CommitTransactionCommand();
return;
@@ -136,7 +139,7 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (namestrcmp(&(attr[i]->attname), col) == 0)
break;
}
- if (i < attr_cnt) /* found */
+ if (i < attr_cnt) /* found */
attnums[tcnt++] = i;
else
{
@@ -295,15 +298,16 @@ attr_stats(Relation onerel, int attr_cnt, VacAttrStats *vacattrstats, HeapTuple
stats->nonnull_cnt++;
/*
- * If the value is toasted, detoast it to avoid repeated detoastings
- * and resultant memory leakage inside the comparison routines.
+ * If the value is toasted, detoast it to avoid repeated
+ * detoastings and resultant memory leakage inside the comparison
+ * routines.
*/
if (!stats->attr->attbyval && stats->attr->attlen == -1)
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
else
value = origvalue;
- if (! stats->initialized)
+ if (!stats->initialized)
{
bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
/* best_cnt gets incremented below */
@@ -433,7 +437,7 @@ bucketcpy(Form_pg_attribute attr, Datum value, Datum *bucket, int *bucket_len)
* Of course, this only works for fixed-size never-null columns, but
* dispersion is.
*
- * pg_statistic rows are just added normally. This means that
+ * pg_statistic rows are just added normally. This means that
* pg_statistic will probably contain some deleted rows at the
* completion of a vacuum cycle, unless it happens to get vacuumed last.
*
@@ -467,7 +471,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
VacAttrStats *stats;
attp = (Form_pg_attribute) GETSTRUCT(atup);
- if (attp->attnum <= 0) /* skip system attributes for now */
+ if (attp->attnum <= 0) /* skip system attributes for now */
continue;
for (i = 0; i < natts; i++)
@@ -476,47 +480,45 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
break;
}
if (i >= natts)
- continue; /* skip attr if no stats collected */
+ continue; /* skip attr if no stats collected */
stats = &(vacattrstats[i]);
if (VacAttrStatsEqValid(stats))
{
- float4 selratio; /* average ratio of rows selected
- * for a random constant */
+ float4 selratio; /* average ratio of rows selected
+ * for a random constant */
/* Compute dispersion */
if (stats->nonnull_cnt == 0 && stats->null_cnt == 0)
{
/*
- * empty relation, so put a dummy value in
- * attdispersion
+ * empty relation, so put a dummy value in attdispersion
*/
selratio = 0;
}
else if (stats->null_cnt <= 1 && stats->best_cnt == 1)
{
+
/*
- * looks like we have a unique-key attribute --- flag
- * this with special -1.0 flag value.
+ * looks like we have a unique-key attribute --- flag this
+ * with special -1.0 flag value.
*
- * The correct dispersion is 1.0/numberOfRows, but since
- * the relation row count can get updated without
- * recomputing dispersion, we want to store a
- * "symbolic" value and figure 1.0/numberOfRows on the
- * fly.
+ * The correct dispersion is 1.0/numberOfRows, but since the
+ * relation row count can get updated without recomputing
+ * dispersion, we want to store a "symbolic" value and
+ * figure 1.0/numberOfRows on the fly.
*/
selratio = -1;
}
else
{
if (VacAttrStatsLtGtValid(stats) &&
- stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
+ stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
{
/*
- * exact result when there are just 1 or 2
- * values...
+ * exact result when there are just 1 or 2 values...
*/
double min_cnt_d = stats->min_cnt,
max_cnt_d = stats->max_cnt,
@@ -552,12 +554,12 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
/*
* Create pg_statistic tuples for the relation, if we have
- * gathered the right data. del_stats() previously
- * deleted all the pg_statistic tuples for the rel, so we
- * just have to insert new ones here.
+ * gathered the right data. del_stats() previously deleted
+ * all the pg_statistic tuples for the rel, so we just have to
+ * insert new ones here.
*
- * Note analyze_rel() has seen to it that we won't come here
- * when vacuuming pg_statistic itself.
+ * Note analyze_rel() has seen to it that we won't come here when
+ * vacuuming pg_statistic itself.
*/
if (VacAttrStatsLtGtValid(stats) && stats->initialized)
{
@@ -567,7 +569,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
char *out_string;
double best_cnt_d = stats->best_cnt,
null_cnt_d = stats->null_cnt,
- nonnull_cnt_d = stats->nonnull_cnt; /* prevent overflow */
+ nonnull_cnt_d = stats->nonnull_cnt; /* prevent overflow */
Datum values[Natts_pg_statistic];
char nulls[Natts_pg_statistic];
Relation irelations[Num_pg_statistic_indices];
@@ -585,31 +587,31 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
* ----------------
*/
i = 0;
- values[i++] = ObjectIdGetDatum(relid); /* starelid */
- values[i++] = Int16GetDatum(attp->attnum); /* staattnum */
- values[i++] = ObjectIdGetDatum(stats->op_cmplt); /* staop */
- values[i++] = Float4GetDatum(nullratio); /* stanullfrac */
- values[i++] = Float4GetDatum(bestratio); /* stacommonfrac */
+ values[i++] = ObjectIdGetDatum(relid); /* starelid */
+ values[i++] = Int16GetDatum(attp->attnum); /* staattnum */
+ values[i++] = ObjectIdGetDatum(stats->op_cmplt); /* staop */
+ values[i++] = Float4GetDatum(nullratio); /* stanullfrac */
+ values[i++] = Float4GetDatum(bestratio); /* stacommonfrac */
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->best,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* stacommonval */
- CStringGetDatum(out_string));
+ stats->best,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* stacommonval */
+ CStringGetDatum(out_string));
pfree(out_string);
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->min,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* staloval */
- CStringGetDatum(out_string));
+ stats->min,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* staloval */
+ CStringGetDatum(out_string));
pfree(out_string);
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->max,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* stahival */
- CStringGetDatum(out_string));
+ stats->max,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* stahival */
+ CStringGetDatum(out_string));
pfree(out_string);
stup = heap_formtuple(sd->rd_att, values, nulls);
@@ -682,6 +684,3 @@ del_stats(Oid relid, int attcnt, int *attnums)
*/
heap_close(pgstatistic, NoLock);
}
-
-
-
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 134f3b7af0e..1eb29dcc99a 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.76 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.77 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,7 +130,7 @@ static void NotifyMyFrontEnd(char *relname, int32 listenerPID);
static int AsyncExistsPendingNotify(char *relname);
static void ClearPendingNotifies(void);
-bool Trace_notify = false;
+bool Trace_notify = false;
/*
@@ -161,6 +161,7 @@ Async_Notify(char *relname)
/* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname))
{
+
/*
* We allocate list memory from the global malloc pool to ensure
* that it will live until we want to use it. This is probably
@@ -349,9 +350,7 @@ Async_UnlistenAll()
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
- {
simple_heap_delete(lRel, &lTuple->t_self);
- }
heap_endscan(sRel);
heap_close(lRel, AccessExclusiveLock);
@@ -499,6 +498,7 @@ AtCommit_Notify()
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
+
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend
@@ -794,7 +794,7 @@ ProcessIncomingNotify(void)
if (Trace_notify)
elog(DEBUG, "ProcessIncomingNotify: received %s from %d",
- relname, (int) sourcePID);
+ relname, (int) sourcePID);
NotifyMyFrontEnd(relname, sourcePID);
/* Rewrite the tuple with 0 in notification column */
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 76f805ca86c..826407c8eb6 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.64 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.65 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
#include "utils/temprel.h"
-static Oid copy_heap(Oid OIDOldHeap, char *NewName, bool istemp);
+static Oid copy_heap(Oid OIDOldHeap, char *NewName, bool istemp);
static void copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName);
static void rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
@@ -75,8 +75,8 @@ cluster(char *oldrelname, char *oldindexname)
StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN);
/*
- * We grab exclusive access to the target rel and index for the duration
- * of the transaction.
+ * We grab exclusive access to the target rel and index for the
+ * duration of the transaction.
*/
OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock);
OIDOldHeap = RelationGetRelid(OldHeap);
@@ -154,8 +154,8 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
- * Need to make a copy of the tuple descriptor,
- * since heap_create_with_catalog modifies it.
+ * Need to make a copy of the tuple descriptor, since
+ * heap_create_with_catalog modifies it.
*/
tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
@@ -164,16 +164,15 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
allowSystemTableMods);
/*
- * Advance command counter so that the newly-created
- * relation's catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's
+ * catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the new relation.
- * Note that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will
- * be visible for insertion.
+ * If necessary, create a TOAST table for the new relation. Note that
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
+ * that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(NewName, true);
@@ -198,12 +197,12 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
/*
* Create a new index like the old one. To do this I get the info
- * from pg_index, and add a new index with a temporary name (that
- * will be changed later).
+ * from pg_index, and add a new index with a temporary name (that will
+ * be changed later).
*
- * NOTE: index_create will cause the new index to be a temp relation
- * if its parent table is, so we don't need to do anything special
- * for the temp-table case here.
+ * NOTE: index_create will cause the new index to be a temp relation if
+ * its parent table is, so we don't need to do anything special for
+ * the temp-table case here.
*/
Old_pg_index_Tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(OIDOldIndex),
@@ -214,7 +213,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
indexInfo = BuildIndexInfo(Old_pg_index_Tuple);
Old_pg_index_relation_Tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(OIDOldIndex),
+ ObjectIdGetDatum(OIDOldIndex),
0, 0, 0);
Assert(Old_pg_index_relation_Tuple);
Old_pg_index_relation_Form = (Form_pg_class) GETSTRUCT(Old_pg_index_relation_Tuple);
@@ -266,13 +265,15 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
LocalHeapTuple.t_datamcxt = NULL;
LocalHeapTuple.t_data = NULL;
heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer);
- if (LocalHeapTuple.t_data != NULL) {
+ if (LocalHeapTuple.t_data != NULL)
+ {
+
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
- * the source relation would get trashed, which is bad news
- * if we abort later on. (This was a bug in releases thru 7.0)
+ * the source relation would get trashed, which is bad news if
+ * we abort later on. (This was a bug in releases thru 7.0)
*/
HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple);
diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c
index 8a3be15a052..49d1edf4c4b 100644
--- a/src/backend/commands/command.c
+++ b/src/backend/commands/command.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.122 2001/02/27 22:07:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
@@ -173,29 +173,29 @@ PerformPortalFetch(char *name,
* at the end of the available tuples in that direction. If so, do
* nothing. (This check exists because not all plan node types are
* robust about being called again if they've already returned NULL
- * once.) If it's OK to do the fetch, call the executor. Then,
+ * once.) If it's OK to do the fetch, call the executor. Then,
* update the atStart/atEnd state depending on the number of tuples
* that were retrieved.
* ----------------
*/
if (forward)
{
- if (! portal->atEnd)
+ if (!portal->atEnd)
{
ExecutorRun(queryDesc, estate, EXEC_FOR, (long) count);
if (estate->es_processed > 0)
- portal->atStart = false; /* OK to back up now */
+ portal->atStart = false; /* OK to back up now */
if (count <= 0 || (int) estate->es_processed < count)
- portal->atEnd = true; /* we retrieved 'em all */
+ portal->atEnd = true; /* we retrieved 'em all */
}
}
else
{
- if (! portal->atStart)
+ if (!portal->atStart)
{
ExecutorRun(queryDesc, estate, EXEC_BACK, (long) count);
if (estate->es_processed > 0)
- portal->atEnd = false; /* OK to go forward now */
+ portal->atEnd = false; /* OK to go forward now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atStart = true; /* we retrieved 'em all */
}
@@ -502,8 +502,8 @@ AlterTableAddColumn(const char *relationName,
heap_close(rel, NoLock);
/*
- * Automatically create the secondary relation for TOAST
- * if it formerly had no such but now has toastable attributes.
+ * Automatically create the secondary relation for TOAST if it
+ * formerly had no such but now has toastable attributes.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relationName, true);
@@ -842,7 +842,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
relcheck = (Form_pg_relcheck) GETSTRUCT(htup);
ccbin = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&relcheck->rcbin)));
+ PointerGetDatum(&relcheck->rcbin)));
node = stringToNode(ccbin);
pfree(ccbin);
if (find_attribute_in_node(node, attnum))
@@ -890,7 +890,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
else
{
htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(index->indexrelid),
+ ObjectIdGetDatum(index->indexrelid),
0, 0, 0);
RemoveIndex(NameStr(((Form_pg_class) GETSTRUCT(htup))->relname));
ReleaseSysCache(htup);
@@ -1106,339 +1106,361 @@ AlterTableAddConstraint(char *relationName,
#endif
/* Disallow ADD CONSTRAINT on views, indexes, sequences, etc */
- if (! is_relation(relationName))
+ if (!is_relation(relationName))
elog(ERROR, "ALTER TABLE ADD CONSTRAINT: %s is not a table",
relationName);
switch (nodeTag(newConstraint))
{
case T_Constraint:
- {
- Constraint *constr = (Constraint *) newConstraint;
-
- switch (constr->contype)
{
- case CONSTR_CHECK:
+ Constraint *constr = (Constraint *) newConstraint;
+
+ switch (constr->contype)
{
- ParseState *pstate;
- bool successful = true;
- HeapScanDesc scan;
- ExprContext *econtext;
- TupleTableSlot *slot;
- HeapTuple tuple;
- RangeTblEntry *rte;
- List *qual;
- List *constlist;
- Relation rel;
- Node *expr;
- char *name;
-
- if (constr->name)
- name = constr->name;
- else
- name = "<unnamed>";
-
- constlist = makeList1(constr);
-
- rel = heap_openr(relationName, AccessExclusiveLock);
-
- /* make sure it is not a view */
- if (rel->rd_rel->relkind == RELKIND_VIEW)
- elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
-
- /*
- * Scan all of the rows, looking for a false match
- */
- scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
- AssertState(scan != NULL);
-
- /*
- * We need to make a parse state and range table to allow
- * us to transformExpr and fix_opids to get a version of
- * the expression we can pass to ExecQual
- */
- pstate = make_parsestate(NULL);
- rte = addRangeTableEntry(pstate, relationName, NULL,
- false, true);
- addRTEtoQuery(pstate, rte, true, true);
-
- /* Convert the A_EXPR in raw_expr into an EXPR */
- expr = transformExpr(pstate, constr->raw_expr,
- EXPR_COLUMN_FIRST);
-
- /*
- * Make sure it yields a boolean result.
- */
- if (exprType(expr) != BOOLOID)
- elog(ERROR, "CHECK '%s' does not yield boolean result",
- name);
-
- /*
- * Make sure no outside relations are referred to.
- */
- if (length(pstate->p_rtable) != 1)
- elog(ERROR, "Only relation '%s' can be referenced in CHECK",
- relationName);
-
- /*
- * Might as well try to reduce any constant expressions.
- */
- expr = eval_const_expressions(expr);
-
- /* And fix the opids */
- fix_opids(expr);
-
- qual = makeList1(expr);
-
- /* Make tuple slot to hold tuples */
- slot = MakeTupleTableSlot();
- ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
- /* Make an expression context for ExecQual */
- econtext = MakeExprContext(slot, CurrentMemoryContext);
-
- /*
- * Scan through the rows now, checking the expression
- * at each row.
- */
- while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- if (!ExecQual(qual, econtext, true))
+ case CONSTR_CHECK:
{
- successful=false;
- break;
- }
- ResetExprContext(econtext);
- }
+ ParseState *pstate;
+ bool successful = true;
+ HeapScanDesc scan;
+ ExprContext *econtext;
+ TupleTableSlot *slot;
+ HeapTuple tuple;
+ RangeTblEntry *rte;
+ List *qual;
+ List *constlist;
+ Relation rel;
+ Node *expr;
+ char *name;
+
+ if (constr->name)
+ name = constr->name;
+ else
+ name = "<unnamed>";
+
+ constlist = makeList1(constr);
+
+ rel = heap_openr(relationName, AccessExclusiveLock);
+
+ /* make sure it is not a view */
+ if (rel->rd_rel->relkind == RELKIND_VIEW)
+ elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
+
+ /*
+ * Scan all of the rows, looking for a false
+ * match
+ */
+ scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
+ AssertState(scan != NULL);
+
+ /*
+ * We need to make a parse state and range
+ * table to allow us to transformExpr and
+ * fix_opids to get a version of the
+ * expression we can pass to ExecQual
+ */
+ pstate = make_parsestate(NULL);
+ rte = addRangeTableEntry(pstate, relationName, NULL,
+ false, true);
+ addRTEtoQuery(pstate, rte, true, true);
+
+ /* Convert the A_EXPR in raw_expr into an EXPR */
+ expr = transformExpr(pstate, constr->raw_expr,
+ EXPR_COLUMN_FIRST);
+
+ /*
+ * Make sure it yields a boolean result.
+ */
+ if (exprType(expr) != BOOLOID)
+ elog(ERROR, "CHECK '%s' does not yield boolean result",
+ name);
+
+ /*
+ * Make sure no outside relations are referred
+ * to.
+ */
+ if (length(pstate->p_rtable) != 1)
+ elog(ERROR, "Only relation '%s' can be referenced in CHECK",
+ relationName);
+
+ /*
+ * Might as well try to reduce any constant
+ * expressions.
+ */
+ expr = eval_const_expressions(expr);
+
+ /* And fix the opids */
+ fix_opids(expr);
+
+ qual = makeList1(expr);
+
+ /* Make tuple slot to hold tuples */
+ slot = MakeTupleTableSlot();
+ ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
+ /* Make an expression context for ExecQual */
+ econtext = MakeExprContext(slot, CurrentMemoryContext);
+
+ /*
+ * Scan through the rows now, checking the
+ * expression at each row.
+ */
+ while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ {
+ ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ if (!ExecQual(qual, econtext, true))
+ {
+ successful = false;
+ break;
+ }
+ ResetExprContext(econtext);
+ }
- FreeExprContext(econtext);
- pfree(slot);
+ FreeExprContext(econtext);
+ pfree(slot);
- heap_endscan(scan);
+ heap_endscan(scan);
- if (!successful)
- {
- heap_close(rel, NoLock);
- elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
- }
- /*
- * Call AddRelationRawConstraints to do the real adding --
- * It duplicates some of the above, but does not check the
- * validity of the constraint against tuples already in
- * the table.
- */
- AddRelationRawConstraints(rel, NIL, constlist);
- heap_close(rel, NoLock);
- pfree(constlist);
-
- break;
+ if (!successful)
+ {
+ heap_close(rel, NoLock);
+ elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
+ }
+
+ /*
+ * Call AddRelationRawConstraints to do the
+ * real adding -- It duplicates some of the
+ * above, but does not check the validity of
+ * the constraint against tuples already in
+ * the table.
+ */
+ AddRelationRawConstraints(rel, NIL, constlist);
+ heap_close(rel, NoLock);
+ pfree(constlist);
+
+ break;
+ }
+ default:
+ elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
}
- default:
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
+ break;
}
- break;
- }
case T_FkConstraint:
- {
- FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
- Relation rel, pkrel;
- HeapScanDesc scan;
- HeapTuple tuple;
- Trigger trig;
- List *list;
- int count;
- List *indexoidlist,
- *indexoidscan;
- Form_pg_attribute *rel_attrs = NULL;
- int i;
- bool found = false;
-
- if (is_temp_rel_name(fkconstraint->pktable_name) &&
- !is_temp_rel_name(relationName))
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
-
- /*
- * Grab an exclusive lock on the pk table, so that someone
- * doesn't delete rows out from under us.
- */
-
- pkrel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
- if (pkrel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referenced table \"%s\" not a relation",
- fkconstraint->pktable_name);
-
- /*
- * Grab an exclusive lock on the fk table, and then scan
- * through each tuple, calling the RI_FKey_Match_Ins
- * (insert trigger) as if that tuple had just been
- * inserted. If any of those fail, it should elog(ERROR)
- * and that's that.
- */
- rel = heap_openr(relationName, AccessExclusiveLock);
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referencing table \"%s\" not a relation",
- relationName);
-
- /* First we check for limited correctness of the constraint */
-
- rel_attrs = pkrel->rd_att->attrs;
- indexoidlist = RelationGetIndexList(pkrel);
-
- foreach(indexoidscan, indexoidlist)
{
- Oid indexoid = lfirsti(indexoidscan);
- HeapTuple indexTuple;
- Form_pg_index indexStruct;
-
- indexTuple = SearchSysCache(INDEXRELID,
- ObjectIdGetDatum(indexoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(indexTuple))
- elog(ERROR, "transformFkeyGetPrimaryKey: index %u not found",
- indexoid);
- indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
-
- if (indexStruct->indisunique)
+ FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
+ Relation rel,
+ pkrel;
+ HeapScanDesc scan;
+ HeapTuple tuple;
+ Trigger trig;
+ List *list;
+ int count;
+ List *indexoidlist,
+ *indexoidscan;
+ Form_pg_attribute *rel_attrs = NULL;
+ int i;
+ bool found = false;
+
+ if (is_temp_rel_name(fkconstraint->pktable_name) &&
+ !is_temp_rel_name(relationName))
+ elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
+
+ /*
+ * Grab an exclusive lock on the pk table, so that someone
+ * doesn't delete rows out from under us.
+ */
+
+ pkrel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
+ if (pkrel->rd_rel->relkind != RELKIND_RELATION)
+ elog(ERROR, "referenced table \"%s\" not a relation",
+ fkconstraint->pktable_name);
+
+ /*
+ * Grab an exclusive lock on the fk table, and then scan
+ * through each tuple, calling the RI_FKey_Match_Ins
+ * (insert trigger) as if that tuple had just been
+ * inserted. If any of those fail, it should elog(ERROR)
+ * and that's that.
+ */
+ rel = heap_openr(relationName, AccessExclusiveLock);
+ if (rel->rd_rel->relkind != RELKIND_RELATION)
+ elog(ERROR, "referencing table \"%s\" not a relation",
+ relationName);
+
+ /*
+ * First we check for limited correctness of the
+ * constraint
+ */
+
+ rel_attrs = pkrel->rd_att->attrs;
+ indexoidlist = RelationGetIndexList(pkrel);
+
+ foreach(indexoidscan, indexoidlist)
{
- List *attrl;
-
- /* Make sure this index has the same number of keys -- It obviously
- * won't match otherwise. */
- for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
- if (i!=length(fkconstraint->pk_attrs))
- found=false;
- else {
- /* go through the fkconstraint->pk_attrs list */
- foreach(attrl, fkconstraint->pk_attrs)
- {
- Ident *attr=lfirst(attrl);
+ Oid indexoid = lfirsti(indexoidscan);
+ HeapTuple indexTuple;
+ Form_pg_index indexStruct;
+
+ indexTuple = SearchSysCache(INDEXRELID,
+ ObjectIdGetDatum(indexoid),
+ 0, 0, 0);
+ if (!HeapTupleIsValid(indexTuple))
+ elog(ERROR, "transformFkeyGetPrimaryKey: index %u not found",
+ indexoid);
+ indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
+
+ if (indexStruct->indisunique)
+ {
+ List *attrl;
+
+ /*
+ * Make sure this index has the same number of
+ * keys -- It obviously won't match otherwise.
+ */
+ for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
+ if (i != length(fkconstraint->pk_attrs))
found = false;
- for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
+ else
+ {
+ /* go through the fkconstraint->pk_attrs list */
+ foreach(attrl, fkconstraint->pk_attrs)
{
- int pkattno = indexStruct->indkey[i];
- if (pkattno>0)
+ Ident *attr = lfirst(attrl);
+
+ found = false;
+ for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
- char *name = NameStr(rel_attrs[pkattno-1]->attname);
- if (strcmp(name, attr->name)==0)
+ int pkattno = indexStruct->indkey[i];
+
+ if (pkattno > 0)
{
- found = true;
- break;
+ char *name = NameStr(rel_attrs[pkattno - 1]->attname);
+
+ if (strcmp(name, attr->name) == 0)
+ {
+ found = true;
+ break;
+ }
}
}
+ if (!found)
+ break;
}
- if (!found)
- break;
}
}
+ ReleaseSysCache(indexTuple);
+ if (found)
+ break;
}
- ReleaseSysCache(indexTuple);
- if (found)
- break;
- }
- if (!found)
- elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
- fkconstraint->pktable_name);
+ if (!found)
+ elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
+ fkconstraint->pktable_name);
- freeList(indexoidlist);
- heap_close(pkrel, NoLock);
+ freeList(indexoidlist);
+ heap_close(pkrel, NoLock);
- rel_attrs = rel->rd_att->attrs;
- if (fkconstraint->fk_attrs!=NIL) {
- List *fkattrs;
- Ident *fkattr;
+ rel_attrs = rel->rd_att->attrs;
+ if (fkconstraint->fk_attrs != NIL)
+ {
+ List *fkattrs;
+ Ident *fkattr;
- found = false;
- foreach(fkattrs, fkconstraint->fk_attrs) {
- int count;
found = false;
- fkattr=lfirst(fkattrs);
- for (count = 0; count < rel->rd_att->natts; count++) {
- char *name = NameStr(rel->rd_att->attrs[count]->attname);
- if (strcmp(name, fkattr->name)==0) {
- found = true;
- break;
+ foreach(fkattrs, fkconstraint->fk_attrs)
+ {
+ int count;
+
+ found = false;
+ fkattr = lfirst(fkattrs);
+ for (count = 0; count < rel->rd_att->natts; count++)
+ {
+ char *name = NameStr(rel->rd_att->attrs[count]->attname);
+
+ if (strcmp(name, fkattr->name) == 0)
+ {
+ found = true;
+ break;
+ }
}
+ if (!found)
+ break;
}
if (!found)
- break;
+ elog(ERROR, "columns referenced in foreign key constraint not found.");
}
- if (!found)
- elog(ERROR, "columns referenced in foreign key constraint not found.");
- }
- trig.tgoid = 0;
- if (fkconstraint->constr_name)
- trig.tgname = fkconstraint->constr_name;
- else
- trig.tgname = "<unknown>";
- trig.tgfoid = 0;
- trig.tgtype = 0;
- trig.tgenabled = TRUE;
- trig.tgisconstraint = TRUE;
- trig.tginitdeferred = FALSE;
- trig.tgdeferrable = FALSE;
-
- trig.tgargs = (char **) palloc(
- sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
- + length(fkconstraint->pk_attrs)));
-
- if (fkconstraint->constr_name)
- trig.tgargs[0] = fkconstraint->constr_name;
- else
- trig.tgargs[0] = "<unknown>";
- trig.tgargs[1] = (char *) relationName;
- trig.tgargs[2] = fkconstraint->pktable_name;
- trig.tgargs[3] = fkconstraint->match_type;
- count = 4;
- foreach(list, fkconstraint->fk_attrs)
+ trig.tgoid = 0;
+ if (fkconstraint->constr_name)
+ trig.tgname = fkconstraint->constr_name;
+ else
+ trig.tgname = "<unknown>";
+ trig.tgfoid = 0;
+ trig.tgtype = 0;
+ trig.tgenabled = TRUE;
+ trig.tgisconstraint = TRUE;
+ trig.tginitdeferred = FALSE;
+ trig.tgdeferrable = FALSE;
+
+ trig.tgargs = (char **) palloc(
+ sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
+ + length(fkconstraint->pk_attrs)));
+
+ if (fkconstraint->constr_name)
+ trig.tgargs[0] = fkconstraint->constr_name;
+ else
+ trig.tgargs[0] = "<unknown>";
+ trig.tgargs[1] = (char *) relationName;
+ trig.tgargs[2] = fkconstraint->pktable_name;
+ trig.tgargs[3] = fkconstraint->match_type;
+ count = 4;
+ foreach(list, fkconstraint->fk_attrs)
{
Ident *fk_at = lfirst(list);
trig.tgargs[count] = fk_at->name;
- count+=2;
+ count += 2;
}
- count = 5;
- foreach(list, fkconstraint->pk_attrs)
+ count = 5;
+ foreach(list, fkconstraint->pk_attrs)
{
Ident *pk_at = lfirst(list);
trig.tgargs[count] = pk_at->name;
- count+=2;
+ count += 2;
}
- trig.tgnargs = count-1;
+ trig.tgnargs = count - 1;
- scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
- AssertState(scan != NULL);
+ scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
+ AssertState(scan != NULL);
- while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
- /* Make a call to the check function */
- /* No parameters are passed, but we do set a context */
- FunctionCallInfoData fcinfo;
- TriggerData trigdata;
+ while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ {
+ /* Make a call to the check function */
+ /* No parameters are passed, but we do set a context */
+ FunctionCallInfoData fcinfo;
+ TriggerData trigdata;
- MemSet(&fcinfo, 0, sizeof(fcinfo));
- /* We assume RI_FKey_check_ins won't look at flinfo... */
+ MemSet(&fcinfo, 0, sizeof(fcinfo));
+ /* We assume RI_FKey_check_ins won't look at flinfo... */
- trigdata.type = T_TriggerData;
- trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
- trigdata.tg_relation = rel;
- trigdata.tg_trigtuple = tuple;
- trigdata.tg_newtuple = NULL;
- trigdata.tg_trigger = &trig;
+ trigdata.type = T_TriggerData;
+ trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
+ trigdata.tg_relation = rel;
+ trigdata.tg_trigtuple = tuple;
+ trigdata.tg_newtuple = NULL;
+ trigdata.tg_trigger = &trig;
- fcinfo.context = (Node *) &trigdata;
+ fcinfo.context = (Node *) &trigdata;
- RI_FKey_check_ins(&fcinfo);
- }
- heap_endscan(scan);
- heap_close(rel, NoLock); /* close rel but keep
- * lock! */
+ RI_FKey_check_ins(&fcinfo);
+ }
+ heap_endscan(scan);
+ heap_close(rel, NoLock); /* close rel but keep
+ * lock! */
- pfree(trig.tgargs);
- break;
- }
+ pfree(trig.tgargs);
+ break;
+ }
default:
elog(ERROR, "ALTER TABLE / ADD CONSTRAINT unable to determine type of constraint passed");
}
@@ -1464,15 +1486,15 @@ AlterTableDropConstraint(const char *relationName,
void
AlterTableOwner(const char *relationName, const char *newOwnerName)
{
- Relation class_rel;
- HeapTuple tuple;
+ Relation class_rel;
+ HeapTuple tuple;
int32 newOwnerSysid;
Relation idescs[Num_pg_class_indices];
/*
* first check that we are a superuser
*/
- if (! superuser())
+ if (!superuser())
elog(ERROR, "ALTER TABLE: permission denied");
/*
@@ -1537,21 +1559,21 @@ AlterTableOwner(const char *relationName, const char *newOwnerName)
void
AlterTableCreateToastTable(const char *relationName, bool silent)
{
- Relation rel;
- Oid myrelid;
- HeapTuple reltup;
- HeapTupleData classtuple;
- TupleDesc tupdesc;
- Relation class_rel;
- Buffer buffer;
- Relation ridescs[Num_pg_class_indices];
- Oid toast_relid;
- Oid toast_idxid;
- char toast_relname[NAMEDATALEN + 1];
- char toast_idxname[NAMEDATALEN + 1];
- Relation toast_idxrel;
- IndexInfo *indexInfo;
- Oid classObjectId[1];
+ Relation rel;
+ Oid myrelid;
+ HeapTuple reltup;
+ HeapTupleData classtuple;
+ TupleDesc tupdesc;
+ Relation class_rel;
+ Buffer buffer;
+ Relation ridescs[Num_pg_class_indices];
+ Oid toast_relid;
+ Oid toast_idxid;
+ char toast_relname[NAMEDATALEN + 1];
+ char toast_idxname[NAMEDATALEN + 1];
+ Relation toast_idxrel;
+ IndexInfo *indexInfo;
+ Oid classObjectId[1];
/*
* permissions checking. XXX exactly what is appropriate here?
@@ -1618,7 +1640,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
/*
* Check to see whether the table actually needs a TOAST table.
*/
- if (! needs_toast_table(rel))
+ if (!needs_toast_table(rel))
{
if (silent)
{
@@ -1652,10 +1674,11 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
"chunk_data",
BYTEAOID,
-1, 0, false);
+
/*
- * Ensure that the toast table doesn't itself get toasted,
- * or we'll be toast :-(. This is essential for chunk_data because
- * type bytea is toastable; hit the other two just to be sure.
+ * Ensure that the toast table doesn't itself get toasted, or we'll be
+ * toast :-(. This is essential for chunk_data because type bytea is
+ * toastable; hit the other two just to be sure.
*/
tupdesc->attrs[0]->attstorage = 'p';
tupdesc->attrs[1]->attstorage = 'p';
@@ -1733,7 +1756,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
@@ -1745,7 +1768,7 @@ needs_toast_table(Relation rel)
bool maxlength_unknown = false;
bool has_toastable_attrs = false;
TupleDesc tupdesc;
- Form_pg_attribute *att;
+ Form_pg_attribute *att;
int32 tuple_length;
int i;
@@ -1762,8 +1785,8 @@ needs_toast_table(Relation rel)
}
else
{
- int32 maxlen = type_maximum_size(att[i]->atttypid,
- att[i]->atttypmod);
+ int32 maxlen = type_maximum_size(att[i]->atttypid,
+ att[i]->atttypmod);
if (maxlen < 0)
maxlength_unknown = true;
@@ -1798,7 +1821,7 @@ LockTableCommand(LockStmt *lockstmt)
rel = heap_openr(lockstmt->relname, NoLock);
if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "LOCK TABLE: %s is not a table", lockstmt->relname);
+ elog(ERROR, "LOCK TABLE: %s is not a table", lockstmt->relname);
if (lockstmt->mode == AccessShareLock)
aclresult = pg_aclcheck(lockstmt->relname, GetUserId(), ACL_RD);
@@ -1817,9 +1840,9 @@ LockTableCommand(LockStmt *lockstmt)
static bool
is_relation(char *name)
{
- Relation rel = heap_openr(name, NoLock);
+ Relation rel = heap_openr(name, NoLock);
- bool retval = (rel->rd_rel->relkind == RELKIND_RELATION);
+ bool retval = (rel->rd_rel->relkind == RELKIND_RELATION);
heap_close(rel, NoLock);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 46e8b8057ec..06397ab323f 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1999, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.26 2001/01/23 04:32:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.27 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ static void CommentAggregate(char *aggregate, List *arguments, char *comment);
static void CommentProc(char *function, List *arguments, char *comment);
static void CommentOperator(char *opname, List *arguments, char *comment);
static void CommentTrigger(char *trigger, char *relation, char *comments);
-static void CreateComments(Oid oid, char *comment);
+static void CreateComments(Oid oid, char *comment);
/*------------------------------------------------------------------
* CommentObject --
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index b518ef572e1..f586869b078 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.134 2001/03/14 21:47:50 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,6 +76,7 @@ static StringInfoData attribute_buf;
#ifdef MULTIBYTE
static int client_encoding;
static int server_encoding;
+
#endif
@@ -285,6 +286,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
"directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone.");
+
/*
* This restriction is unfortunate, but necessary until the frontend
* COPY protocol is redesigned to be binary-safe...
@@ -344,8 +346,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
mode_t oumask; /* Pre-existing umask value */
/*
- * Prevent write to relative path ... too easy to shoot oneself
- * in the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot
+ * oneself in the foot by overwriting a database file ...
*/
if (filename[0] != '/')
elog(ERROR, "Relative path not allowed for server side"
@@ -408,7 +410,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
attr_count = rel->rd_att->natts;
attr = rel->rd_att->attrs;
- /* For binary copy we really only need isvarlena, but compute it all... */
+ /*
+ * For binary copy we really only need isvarlena, but compute it
+ * all...
+ */
out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
elements = (Oid *) palloc(attr_count * sizeof(Oid));
isvarlena = (bool *) palloc(attr_count * sizeof(bool));
@@ -417,7 +422,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Oid out_func_oid;
if (!getTypeOutputInfo(attr[i]->atttypid,
- &out_func_oid, &elements[i], &isvarlena[i]))
+ &out_func_oid, &elements[i], &isvarlena[i]))
elog(ERROR, "COPY: couldn't lookup info for type %u",
attr[i]->atttypid);
fmgr_info(out_func_oid, &out_functions[i]);
@@ -454,7 +459,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (binary)
{
/* Binary per-tuple header */
- int16 fld_count = attr_count;
+ int16 fld_count = attr_count;
CopySendData(&fld_count, sizeof(int16), fp);
/* Send OID if wanted --- note fld_count doesn't include it */
@@ -471,7 +476,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (oids)
{
string = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(tuple->t_data->t_oid)));
+ ObjectIdGetDatum(tuple->t_data->t_oid)));
CopySendString(string, fp);
pfree(string);
need_delim = true;
@@ -497,20 +502,22 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
{
if (!binary)
{
- CopySendString(null_print, fp); /* null indicator */
+ CopySendString(null_print, fp); /* null indicator */
}
else
{
- fld_size = 0; /* null marker */
+ fld_size = 0; /* null marker */
CopySendData(&fld_size, sizeof(int16), fp);
}
}
else
{
+
/*
- * If we have a toasted datum, forcibly detoast it to avoid
- * memory leakage inside the type's output routine (or
- * for binary case, becase we must output untoasted value).
+ * If we have a toasted datum, forcibly detoast it to
+ * avoid memory leakage inside the type's output routine
+ * (or for binary case, becase we must output untoasted
+ * value).
*/
if (isvarlena[i])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@@ -520,9 +527,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (!binary)
{
string = DatumGetCString(FunctionCall3(&out_functions[i],
- value,
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod)));
+ value,
+ ObjectIdGetDatum(elements[i]),
+ Int32GetDatum(attr[i]->atttypmod)));
CopyAttributeOut(fp, string, delim);
pfree(string);
}
@@ -552,8 +559,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
- * We need this horsing around because we don't know
- * how shorter data values are aligned within a Datum.
+ * We need this horsing around because we don't
+ * know how shorter data values are aligned within
+ * a Datum.
*/
store_att_byval(&datumBuf, value, fld_size);
CopySendData(&datumBuf,
@@ -577,7 +585,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (binary)
{
/* Generate trailer for a binary copy */
- int16 fld_count = -1;
+ int16 fld_count = -1;
CopySendData(&fld_count, sizeof(int16), fp);
}
@@ -609,7 +617,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
int done = 0;
char *string;
ResultRelInfo *resultRelInfo;
- EState *estate = CreateExecutorState(); /* for ExecConstraints() */
+ EState *estate = CreateExecutorState(); /* for ExecConstraints() */
TupleTable tupleTable;
TupleTableSlot *slot;
Oid loaded_oid = InvalidOid;
@@ -622,11 +630,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
/*
* We need a ResultRelInfo so we can use the regular executor's
- * index-entry-making machinery. (There used to be a huge amount
- * of code here that basically duplicated execUtils.c ...)
+ * index-entry-making machinery. (There used to be a huge amount of
+ * code here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
- resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
+ resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
resultRelInfo->ri_RelationDesc = rel;
ExecOpenIndices(resultRelInfo);
@@ -673,7 +681,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: bogus file header (missing flags)");
file_has_oids = (tmp & (1 << 16)) != 0;
- tmp &= ~ (1 << 16);
+ tmp &= ~(1 << 16);
if ((tmp >> 16) != 0)
elog(ERROR, "COPY BINARY: unrecognized critical flags in header");
/* Header extension length */
@@ -727,7 +735,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
else
{
loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(string)));
+ CStringGetDatum(string)));
if (loaded_oid == InvalidOid)
elog(ERROR, "COPY TEXT: Invalid Oid");
}
@@ -747,8 +755,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
{
values[i] = FunctionCall3(&in_functions[i],
CStringGetDatum(string),
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod));
+ ObjectIdGetDatum(elements[i]),
+ Int32GetDatum(attr[i]->atttypmod));
nulls[i] = ' ';
}
}
@@ -757,8 +765,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
}
else
{ /* binary */
- int16 fld_count,
- fld_size;
+ int16 fld_count,
+ fld_size;
CopyGetData(&fld_count, sizeof(int16), fp);
if (CopyGetEof(fp) ||
@@ -791,15 +799,15 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: unexpected EOF");
if (fld_size == 0)
- continue; /* it's NULL; nulls[i] already set */
+ continue; /* it's NULL; nulls[i] already set */
if (fld_size != attr[i]->attlen)
elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
- i+1, (int) fld_size, (int) attr[i]->attlen);
+ i + 1, (int) fld_size, (int) attr[i]->attlen);
if (fld_size == -1)
{
/* varlena field */
- int32 varlena_size;
- Pointer varlena_ptr;
+ int32 varlena_size;
+ Pointer varlena_ptr;
CopyGetData(&varlena_size, sizeof(int32), fp);
if (CopyGetEof(fp))
@@ -818,7 +826,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
else if (!attr[i]->attbyval)
{
/* fixed-length pass-by-reference */
- Pointer refval_ptr;
+ Pointer refval_ptr;
Assert(fld_size > 0);
refval_ptr = (Pointer) palloc(fld_size);
@@ -833,8 +841,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
- * We need this horsing around because we don't know
- * how shorter data values are aligned within a Datum.
+ * We need this horsing around because we don't
+ * know how shorter data values are aligned within
+ * a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
@@ -1163,6 +1172,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
char *string_start;
int mblen;
int i;
+
#endif
#ifdef MULTIBYTE
@@ -1182,7 +1192,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
#endif
#ifdef MULTIBYTE
- for (; (mblen = (server_encoding == client_encoding? 1 : pg_encoding_mblen(client_encoding, string))) &&
+ for (; (mblen = (server_encoding == client_encoding ? 1 : pg_encoding_mblen(client_encoding, string))) &&
((c = *string) != '\0'); string += mblen)
#else
for (; (c = *string) != '\0'; string++)
@@ -1199,7 +1209,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
}
#ifdef MULTIBYTE
- if (client_encoding != server_encoding)
+ if (client_encoding != server_encoding)
pfree(string_start); /* pfree pg_server_to_client result */
#endif
}
diff --git a/src/backend/commands/creatinh.c b/src/backend/commands/creatinh.c
index a043cf0b8e0..c4a5eaa00e9 100644
--- a/src/backend/commands/creatinh.c
+++ b/src/backend/commands/creatinh.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.72 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
static int checkAttrExists(const char *attributeName,
const char *attributeType, List *schema);
static List *MergeAttributes(List *schema, List *supers, bool istemp,
- List **supOids, List **supconstr);
+ List **supOids, List **supconstr);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
@@ -150,10 +150,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't
- * see the new rel anyway until we commit), but it keeps the lock manager
- * from complaining about deadlock risks.
+ * see the new rel anyway until we commit), but it keeps the lock
+ * manager from complaining about deadlock risks.
*/
rel = heap_openr(relname, AccessExclusiveLock);
@@ -242,7 +242,7 @@ TruncateRelation(char *name)
* Varattnos of pg_relcheck.rcbin should be rewritten when
* subclasses inherit the constraints from the super class.
* Note that these functions rewrite varattnos while walking
- * through a node tree.
+ * through a node tree.
*/
static bool
change_varattnos_walker(Node *node, const AttrNumber *newattno)
@@ -251,15 +251,15 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
return false;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
if (var->varlevelsup == 0 && var->varno == 1)
{
+
/*
- * ??? the following may be a problem when the
- * node is multiply referenced though
- * stringToNode() doesn't create such a node
- * currently.
+ * ??? the following may be a problem when the node is
+ * multiply referenced though stringToNode() doesn't create
+ * such a node currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
@@ -373,9 +373,12 @@ MergeAttributes(List *schema, List *supers, bool istemp,
AttrNumber attrno;
TupleDesc tupleDesc;
TupleConstr *constr;
- AttrNumber *newattno, *partialAttidx;
- Node *expr;
- int i, attidx, attno_exist;
+ AttrNumber *newattno,
+ *partialAttidx;
+ Node *expr;
+ int i,
+ attidx,
+ attno_exist;
relation = heap_openr(name, AccessShareLock);
@@ -385,7 +388,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && is_temp_rel_name(name))
elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name);
- /* We should have an UNDER permission flag for this, but for now,
+ /*
+ * We should have an UNDER permission flag for this, but for now,
* demand that creator of a child table own the parent.
*/
if (!pg_ownercheck(GetUserId(), name, RELNAME))
@@ -397,14 +401,15 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/* allocate a new attribute number table and initialize */
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
- newattno [i] = 0;
+ newattno[i] = 0;
+
/*
- * searching and storing order are different.
- * another table is needed.
- */
+ * searching and storing order are different. another table is
+ * needed.
+ */
partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
- partialAttidx [i] = 0;
+ partialAttidx[i] = 0;
constr = tupleDesc->constr;
attidx = 0;
@@ -577,9 +582,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
Datum datum[Natts_pg_inherits];
char nullarr[Natts_pg_inherits];
- datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
- datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
- datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
+ datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
+ datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
+ datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
nullarr[1] = ' ';
@@ -730,7 +735,7 @@ checkAttrExists(const char *attributeName, const char *attributeType,
List *schema)
{
List *s;
- int i = 0;
+ int i = 0;
foreach(s, schema)
{
@@ -756,9 +761,9 @@ checkAttrExists(const char *attributeName, const char *attributeType,
static void
setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
{
- Relation relationRelation;
- HeapTuple tuple;
- Relation idescs[Num_pg_class_indices];
+ Relation relationRelation;
+ HeapTuple tuple;
+ Relation idescs[Num_pg_class_indices];
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index c450f1b400a..cd409781b2b 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.73 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.74 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@
/* non-export function prototypes */
static bool get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
- int *encodingP, bool *dbIsTemplateP,
- Oid *dbLastSysOidP, char *dbpath);
+ int *encodingP, bool *dbIsTemplateP,
+ Oid *dbLastSysOidP, char *dbpath);
static bool get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb);
static char *resolve_alt_dbpath(const char *dbpath, Oid dboid);
static bool remove_dbdirs(const char *real_loc, const char *altloc);
@@ -82,12 +82,12 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
/*
- * Check for db name conflict. There is a race condition here, since
+ * Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
- * However, holding an exclusive lock on pg_database for the whole time
- * we are copying the source database doesn't seem like a good idea,
- * so accept possibility of race to create. We will check again after
- * we grab the exclusive lock.
+ * However, holding an exclusive lock on pg_database for the whole
+ * time we are copying the source database doesn't seem like a good
+ * idea, so accept possibility of race to create. We will check again
+ * after we grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL))
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
@@ -96,15 +96,16 @@ createdb(const char *dbname, const char *dbpath,
* Lookup database (template) to be cloned.
*/
if (!dbtemplate)
- dbtemplate = "template1"; /* Default template database name */
+ dbtemplate = "template1"; /* Default template database name */
if (!get_db_info(dbtemplate, &src_dboid, &src_owner, &src_encoding,
&src_istemplate, &src_lastsysoid, src_dbpath))
elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
dbtemplate);
+
/*
- * Permission check: to copy a DB that's not marked datistemplate,
- * you must be superuser or the owner thereof.
+ * Permission check: to copy a DB that's not marked datistemplate, you
+ * must be superuser or the owner thereof.
*/
if (!src_istemplate)
{
@@ -112,6 +113,7 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate);
}
+
/*
* Determine physical path of source database
*/
@@ -133,14 +135,16 @@ createdb(const char *dbname, const char *dbpath,
if (encoding < 0)
encoding = src_encoding;
- /*
- * Preassign OID for pg_database tuple, so that we can compute db path.
+ /*
+ * Preassign OID for pg_database tuple, so that we can compute db
+ * path.
*/
dboid = newoid();
/*
- * Compute nominal location (where we will try to access the database),
- * and resolve alternate physical location if one is specified.
+ * Compute nominal location (where we will try to access the
+ * database), and resolve alternate physical location if one is
+ * specified.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@@ -155,8 +159,8 @@ createdb(const char *dbname, const char *dbpath,
/*
* Force dirty buffers out to disk, to ensure source database is
- * up-to-date for the copy. (We really only need to flush buffers
- * for the source database...)
+ * up-to-date for the copy. (We really only need to flush buffers for
+ * the source database...)
*/
BufferSync();
@@ -231,7 +235,8 @@ createdb(const char *dbname, const char *dbpath,
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
- tuple->t_data->t_oid = dboid; /* override heap_insert's OID selection */
+ tuple->t_data->t_oid = dboid; /* override heap_insert's OID
+ * selection */
heap_insert(pg_database_rel, tuple);
@@ -273,9 +278,9 @@ dropdb(const char *dbname)
bool db_istemplate;
bool use_super;
Oid db_id;
- char *alt_loc;
- char *nominal_loc;
- char dbpath[MAXPGPATH];
+ char *alt_loc;
+ char *nominal_loc;
+ char dbpath[MAXPGPATH];
Relation pgdbrel;
HeapScanDesc pgdbscan;
ScanKeyData key;
@@ -311,8 +316,8 @@ dropdb(const char *dbname)
elog(ERROR, "DROP DATABASE: permission denied");
/*
- * Disallow dropping a DB that is marked istemplate. This is just
- * to prevent people from accidentally dropping template0 or template1;
+ * Disallow dropping a DB that is marked istemplate. This is just to
+ * prevent people from accidentally dropping template0 or template1;
* they can do so if they're really determined ...
*/
if (db_istemplate)
@@ -338,6 +343,7 @@ dropdb(const char *dbname)
tup = heap_getnext(pgdbscan, 0);
if (!HeapTupleIsValid(tup))
{
+
/*
* This error should never come up since the existence of the
* database is checked earlier
@@ -437,7 +443,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
tmptext = DatumGetTextP(heap_getattr(tuple,
Anum_pg_database_datpath,
- RelationGetDescr(relation),
+ RelationGetDescr(relation),
&isnull));
if (!isnull)
{
@@ -481,11 +487,11 @@ get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb)
static char *
-resolve_alt_dbpath(const char * dbpath, Oid dboid)
+resolve_alt_dbpath(const char *dbpath, Oid dboid)
{
- const char * prefix;
- char * ret;
- size_t len;
+ const char *prefix;
+ char *ret;
+ size_t len;
if (dbpath == NULL || dbpath[0] == '\0')
return NULL;
@@ -502,7 +508,8 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
else
{
/* must be environment variable */
- char * var = getenv(dbpath);
+ char *var = getenv(dbpath);
+
if (!var)
elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
if (var[0] != '/')
@@ -519,11 +526,11 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
static bool
-remove_dbdirs(const char * nominal_loc, const char * alt_loc)
+remove_dbdirs(const char *nominal_loc, const char *alt_loc)
{
- const char *target_dir;
- char buf[MAXPGPATH + 100];
- bool success = true;
+ const char *target_dir;
+ char buf[MAXPGPATH + 100];
+ bool success = true;
target_dir = alt_loc ? alt_loc : nominal_loc;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 4f5f8a47f64..c8a2726a8f7 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.52 2001/02/12 20:07:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -70,7 +70,7 @@ case_translate_language_name(const char *input, char *output)
--------------------------------------------------------------------------*/
int i;
- for (i = 0; i < NAMEDATALEN-1 && input[i]; ++i)
+ for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
output[i] = '\0';
@@ -110,12 +110,12 @@ compute_full_attributes(List *parameters,
Note: currently, only two of these parameters actually do anything:
* canCache means the optimizer's constant-folder is allowed to
- pre-evaluate the function when all its inputs are constants.
+ pre-evaluate the function when all its inputs are constants.
* isStrict means the function should not be called when any NULL
- inputs are present; instead a NULL result value should be assumed.
+ inputs are present; instead a NULL result value should be assumed.
- The other four parameters are not used anywhere. They used to be
+ The other four parameters are not used anywhere. They used to be
used in the "expensive functions" optimizer, but that's been dead code
for a long time.
@@ -217,21 +217,26 @@ void
CreateFunction(ProcedureStmt *stmt, CommandDest dest)
{
char *probin_str;
+
/* pathname of executable file that executes this function, if any */
char *prosrc_str;
+
/* SQL that executes this function, if any */
char *prorettype;
+
/* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN];
+
/*
- * name of language of function, with case adjusted: "C",
- * "internal", "sql", etc.
+ * name of language of function, with case adjusted: "C", "internal",
+ * "sql", etc.
*/
bool returnsSet;
+
/* The function returns a set of values, as opposed to a singleton. */
/*
@@ -257,7 +262,7 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
if (!superuser())
elog(ERROR,
"Only users with Postgres superuser privilege are "
- "permitted to create a function in the '%s' language.\n\t"
+ "permitted to create a function in the '%s' language.\n\t"
"Others may use the 'sql' language "
"or the created procedural languages.",
languageName);
@@ -380,14 +385,14 @@ DefineOperator(char *oprName,
{
typeName1 = defGetString(defel);
if (IsA(defel->arg, TypeName)
- && ((TypeName *) defel->arg)->setof)
+ &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for leftarg");
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
typeName2 = defGetString(defel);
if (IsA(defel->arg, TypeName)
- && ((TypeName *) defel->arg)->setof)
+ &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for rightarg");
}
else if (strcasecmp(defel->defname, "procedure") == 0)
@@ -478,8 +483,8 @@ DefineAggregate(char *aggName, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl);
/*
- * sfunc1, stype1, and initcond1 are accepted as obsolete spellings
- * for sfunc, stype, initcond.
+ * sfunc1, stype1, and initcond1 are accepted as obsolete
+ * spellings for sfunc, stype, initcond.
*/
if (strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetString(defel);
@@ -515,12 +520,12 @@ DefineAggregate(char *aggName, List *parameters)
/*
* Most of the argument-checking is done inside of AggregateCreate
*/
- AggregateCreate(aggName, /* aggregate name */
- transfuncName, /* step function name */
- finalfuncName, /* final function name */
- baseType, /* type of data being aggregated */
- transType, /* transition data type */
- initval); /* initial condition */
+ AggregateCreate(aggName, /* aggregate name */
+ transfuncName, /* step function name */
+ finalfuncName, /* final function name */
+ baseType, /* type of data being aggregated */
+ transType, /* transition data type */
+ initval); /* initial condition */
}
/*
@@ -543,13 +548,13 @@ DefineType(char *typeName, List *parameters)
char delimiter = DEFAULT_TYPDELIM;
char *shadow_type;
List *pl;
- char alignment = 'i'; /* default alignment */
+ char alignment = 'i';/* default alignment */
char storage = 'p'; /* default storage in TOAST */
/*
- * Type names must be one character shorter than other names,
- * allowing room to create the corresponding array type name with
- * prepended "_".
+ * Type names must be one character shorter than other names, allowing
+ * room to create the corresponding array type name with prepended
+ * "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
{
@@ -692,14 +697,16 @@ defGetString(DefElem *def)
switch (nodeTag(def->arg))
{
case T_Integer:
- {
- char *str = palloc(32);
+ {
+ char *str = palloc(32);
- snprintf(str, 32, "%ld", (long) intVal(def->arg));
- return str;
- }
+ snprintf(str, 32, "%ld", (long) intVal(def->arg));
+ return str;
+ }
case T_Float:
- /* T_Float values are kept in string form, so this type cheat
+
+ /*
+ * T_Float values are kept in string form, so this type cheat
* works (and doesn't risk losing precision)
*/
return strVal(def->arg);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 31f24d88a6f..672ec54cb02 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.64 2001/01/27 01:41:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.65 2001/03/22 03:59:22 momjian Exp $
*
*/
@@ -271,7 +271,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
stringStringInfo(rte->relname));
if (strcmp(rte->eref->relname, rte->relname) != 0)
appendStringInfo(str, " %s",
- stringStringInfo(rte->eref->relname));
+ stringStringInfo(rte->eref->relname));
}
break;
case T_SubqueryScan:
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 6497da615b8..2d3e70c427b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.45 2001/02/23 09:26:14 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,15 +49,15 @@ static void CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid);
static void CheckPredExpr(Node *predicate, List *rangeTable, Oid baseRelOid);
static void CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid);
static void FuncIndexArgs(IndexInfo *indexInfo, Oid *classOidP,
- IndexElem *funcIndex,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ IndexElem *funcIndex,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static void NormIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
-static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
+static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
+ char *accessMethodName, Oid accessMethodId);
static char *GetDefaultOpClass(Oid atttypid);
/*
@@ -118,9 +118,9 @@ DefineIndex(char *heapRelationName,
accessMethodName);
/*
- * XXX Hardwired hacks to check for limitations on supported index types.
- * We really ought to be learning this info from entries in the pg_am
- * table, instead of having it wired in here!
+ * XXX Hardwired hacks to check for limitations on supported index
+ * types. We really ought to be learning this info from entries in the
+ * pg_am table, instead of having it wired in here!
*/
if (unique && accessMethodId != BTREE_AM_OID)
elog(ERROR, "DefineIndex: unique indices are only available with the btree access method");
@@ -161,7 +161,8 @@ DefineIndex(char *heapRelationName,
elog(ERROR, "Existing indexes are inactive. REINDEX first");
/*
- * Prepare arguments for index_create, primarily an IndexInfo structure
+ * Prepare arguments for index_create, primarily an IndexInfo
+ * structure
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_Predicate = (Node *) cnfPred;
@@ -207,7 +208,7 @@ DefineIndex(char *heapRelationName,
/*
* We update the relation's pg_class tuple even if it already has
- * relhasindex = true. This is needed to cause a shared-cache-inval
+ * relhasindex = true. This is needed to cause a shared-cache-inval
* message to be sent for the pg_class tuple, which will cause other
* backends to flush their relcache entries and in particular their
* cached lists of the indexes for this relation.
@@ -415,8 +416,8 @@ FuncIndexArgs(IndexInfo *indexInfo,
* has exact-match or binary-compatible input types.
* ----------------
*/
- if (! func_get_detail(funcIndex->name, nargs, argTypes,
- &funcid, &rettype, &retset, &true_typeids))
+ if (!func_get_detail(funcIndex->name, nargs, argTypes,
+ &funcid, &rettype, &retset, &true_typeids))
func_error("DefineIndex", funcIndex->name, nargs, argTypes, NULL);
if (retset)
@@ -425,7 +426,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
for (i = 0; i < nargs; i++)
{
if (argTypes[i] != true_typeids[i] &&
- ! IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
+ !IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
func_error("DefineIndex", funcIndex->name, nargs, argTypes,
"Index function must be binary-compatible with table datatype");
}
@@ -439,7 +440,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
indexInfo->ii_FuncOid = funcid;
/* Need to do the fmgr function lookup now, too */
- fmgr_info(funcid, & indexInfo->ii_FuncInfo);
+ fmgr_info(funcid, &indexInfo->ii_FuncInfo);
}
static void
@@ -477,7 +478,7 @@ NormIndexAttrs(IndexInfo *indexInfo,
indexInfo->ii_KeyAttrNumbers[attn] = attform->attnum;
classOidP[attn] = GetAttrOpClass(attribute, attform->atttypid,
- accessMethodName, accessMethodId);
+ accessMethodName, accessMethodId);
ReleaseSysCache(atttuple);
attn++;
@@ -515,8 +516,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
attribute->class);
/*
- * Assume the opclass is supported by this index access method
- * if we can find at least one relevant entry in pg_amop.
+ * Assume the opclass is supported by this index access method if we
+ * can find at least one relevant entry in pg_amop.
*/
ScanKeyEntryInitialize(&entry[0], 0,
Anum_pg_amop_amopid,
@@ -530,7 +531,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
relation = heap_openr(AccessMethodOperatorRelationName, AccessShareLock);
scan = heap_beginscan(relation, false, SnapshotNow, 2, entry);
- if (! HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ if (!HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
elog(ERROR, "DefineIndex: opclass \"%s\" not supported by access method \"%s\"",
attribute->class, accessMethodName);
@@ -540,17 +541,18 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
heap_close(relation, AccessShareLock);
/*
- * Make sure the operators associated with this opclass actually accept
- * the column data type. This prevents possible coredumps caused by
- * user errors like applying text_ops to an int4 column. We will accept
- * an opclass as OK if the operator's input datatype is binary-compatible
- * with the actual column datatype. Note we assume that all the operators
- * associated with an opclass accept the same datatypes, so checking the
- * first one we happened to find in the table is sufficient.
+ * Make sure the operators associated with this opclass actually
+ * accept the column data type. This prevents possible coredumps
+ * caused by user errors like applying text_ops to an int4 column. We
+ * will accept an opclass as OK if the operator's input datatype is
+ * binary-compatible with the actual column datatype. Note we assume
+ * that all the operators associated with an opclass accept the same
+ * datatypes, so checking the first one we happened to find in the
+ * table is sufficient.
*
* If the opclass was the default for the datatype, assume we can skip
- * this check --- that saves a few cycles in the most common case.
- * If pg_opclass is wrong then we're probably screwed anyway...
+ * this check --- that saves a few cycles in the most common case. If
+ * pg_opclass is wrong then we're probably screwed anyway...
*/
if (doTypeCheck)
{
@@ -560,11 +562,11 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
if (HeapTupleIsValid(tuple))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tuple);
- Oid opInputType = (optup->oprkind == 'l') ?
- optup->oprright : optup->oprleft;
+ Oid opInputType = (optup->oprkind == 'l') ?
+ optup->oprright : optup->oprleft;
if (attrType != opInputType &&
- ! IS_BINARY_COMPATIBLE(attrType, opInputType))
+ !IS_BINARY_COMPATIBLE(attrType, opInputType))
elog(ERROR, "DefineIndex: opclass \"%s\" does not accept datatype \"%s\"",
attribute->class, typeidTypeName(attrType));
ReleaseSysCache(tuple);
@@ -660,7 +662,7 @@ ReindexIndex(const char *name, bool force /* currently unused */ )
if (IsIgnoringSystemIndexes())
overwrite = true;
if (!reindex_index(tuple->t_data->t_oid, force, overwrite))
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
elog(NOTICE, "index \"%s\" wasn't reindexed", name);
ReleaseSysCache(tuple);
@@ -752,18 +754,18 @@ ReindexDatabase(const char *dbname, bool force, bool all)
elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
/*
- * We cannot run inside a user transaction block; if we were
- * inside a transaction, then our commit- and
- * start-transaction-command calls would not have the intended effect!
+ * We cannot run inside a user transaction block; if we were inside a
+ * transaction, then our commit- and start-transaction-command calls
+ * would not have the intended effect!
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
/*
- * Create a memory context that will survive forced transaction commits
- * we do below. Since it is a child of QueryContext, it will go away
- * eventually even if we suffer an error; there's no need for special
- * abort cleanup logic.
+ * Create a memory context that will survive forced transaction
+ * commits we do below. Since it is a child of QueryContext, it will
+ * go away eventually even if we suffer an error; there's no need for
+ * special abort cleanup logic.
*/
private_context = AllocSetContextCreate(QueryContext,
"ReindexDatabase",
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index bbf008c918e..ca1dbf3cbe4 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -111,7 +111,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
i = 0;
values[i++] = PointerGetDatum(languageName);
- values[i++] = BoolGetDatum(true); /* lanispl */
+ values[i++] = BoolGetDatum(true); /* lanispl */
values[i++] = BoolGetDatum(stmt->pltrusted);
values[i++] = ObjectIdGetDatum(procTup->t_data->t_oid);
values[i++] = DirectFunctionCall1(textin,
diff --git a/src/backend/commands/remove.c b/src/backend/commands/remove.c
index 2c271758e08..da5ad74d8ba 100644
--- a/src/backend/commands/remove.c
+++ b/src/backend/commands/remove.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.59 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.60 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,8 +40,8 @@
*/
void
RemoveOperator(char *operatorName, /* operator name */
- char *typeName1, /* left argument type name */
- char *typeName2) /* right argument type name */
+ char *typeName1, /* left argument type name */
+ char *typeName2) /* right argument type name */
{
Relation relation;
HeapTuple tup;
diff --git a/src/backend/commands/rename.c b/src/backend/commands/rename.c
index 3630cdd0d19..52568f29f5f 100644
--- a/src/backend/commands/rename.c
+++ b/src/backend/commands/rename.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.56 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -189,15 +189,15 @@ renamerel(const char *oldrelname, const char *newrelname)
newrelname);
/*
- * Check for renaming a temp table, which only requires altering
- * the temp-table mapping, not the underlying table.
+ * Check for renaming a temp table, which only requires altering the
+ * temp-table mapping, not the underlying table.
*/
if (rename_temp_relation(oldrelname, newrelname))
return; /* all done... */
/*
- * Instead of using heap_openr(), do it the hard way, so that we
- * can rename indexes as well as regular relations.
+ * Instead of using heap_openr(), do it the hard way, so that we can
+ * rename indexes as well as regular relations.
*/
targetrelation = RelationNameGetRelation(oldrelname);
@@ -219,8 +219,9 @@ renamerel(const char *oldrelname, const char *newrelname)
heap_close(targetrelation, NoLock);
/*
- * Flush the relcache entry (easier than trying to change it at exactly
- * the right instant). It'll get rebuilt on next access to relation.
+ * Flush the relcache entry (easier than trying to change it at
+ * exactly the right instant). It'll get rebuilt on next access to
+ * relation.
*
* XXX What if relation is myxactonly?
*
@@ -244,8 +245,8 @@ renamerel(const char *oldrelname, const char *newrelname)
elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup
- * is OK because it's a copy...)
+ * Update pg_class tuple with new relname. (Scribbling on reltup is
+ * OK because it's a copy...)
*/
StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname),
newrelname, NAMEDATALEN);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 04398423b67..85a8b740048 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.51 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.52 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
* so we pre-log a few fetches in advance. In the event of
* crash we can lose as much as we pre-logged.
*/
-#define SEQ_LOG_VALS 32
+#define SEQ_LOG_VALS 32
typedef struct sequence_magic
{
@@ -140,7 +140,7 @@ DefineSequence(CreateSeqStmt *seq)
case SEQ_COL_LOG:
typnam->name = "int4";
coldef->colname = "log_cnt";
- value[i - 1] = Int32GetDatum((int32)1);
+ value[i - 1] = Int32GetDatum((int32) 1);
break;
case SEQ_COL_CYCLE:
typnam->name = "char";
@@ -247,7 +247,7 @@ nextval(PG_FUNCTION_ARGS)
logit = true;
}
- while (fetch) /* try to fetch cache [+ log ] numbers */
+ while (fetch) /* try to fetch cache [+ log ] numbers */
{
/*
@@ -292,8 +292,8 @@ nextval(PG_FUNCTION_ARGS)
log--;
rescnt++;
last = next;
- if (rescnt == 1) /* if it's first result - */
- result = next; /* it's what to return */
+ if (rescnt == 1) /* if it's first result - */
+ result = next; /* it's what to return */
}
}
@@ -306,12 +306,12 @@ nextval(PG_FUNCTION_ARGS)
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@@ -319,17 +319,17 @@ nextval(PG_FUNCTION_ARGS)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader)page)->pd_special -
- ((PageHeader)page)->pd_upper;
+ rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
+ rdata[1].len = ((PageHeader) page)->pd_special -
+ ((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
+ recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
- if (fetch) /* not all numbers were fetched */
+ if (fetch) /* not all numbers were fetched */
log -= fetch;
}
@@ -374,15 +374,15 @@ currval(PG_FUNCTION_ARGS)
PG_RETURN_INT32(result);
}
-/*
+/*
* Main internal procedure that handles 2 & 3 arg forms of SETVAL.
*
* Note that the 3 arg version (which sets the is_called flag) is
* only for use in pg_dump, and setting the is_called flag may not
- * work if multiple users are attached to the database and referencing
+ * work if multiple users are attached to the database and referencing
* the sequence (unlikely if pg_dump is restoring it).
*
- * It is necessary to have the 3 arg version so that pg_dump can
+ * It is necessary to have the 3 arg version so that pg_dump can
* restore the state of a sequence exactly during data-only restores -
* it is the only way to clear the is_called flag in an existing
* sequence.
@@ -409,18 +409,19 @@ do_setval(char *seqname, int32 next, bool iscalled)
/* save info in local cache */
elm->last = next; /* last returned number */
- elm->cached = next; /* last cached number (forget cached values) */
+ elm->cached = next; /* last cached number (forget cached
+ * values) */
START_CRIT_SECTION();
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@@ -428,12 +429,12 @@ do_setval(char *seqname, int32 next, bool iscalled)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader)page)->pd_special -
- ((PageHeader)page)->pd_upper;
+ rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
+ rdata[1].len = ((PageHeader) page)->pd_special -
+ ((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
+ recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@@ -496,7 +497,7 @@ static char *
get_seq_name(text *seqin)
{
char *rawname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(seqin)));
+ PointerGetDatum(seqin)));
int rawlen = strlen(rawname);
char *seqname;
@@ -511,6 +512,7 @@ get_seq_name(text *seqin)
else
{
seqname = rawname;
+
/*
* It's important that this match the identifier downcasing code
* used by backend/parser/scan.l.
@@ -752,15 +754,16 @@ get_param(DefElem *def)
return -1;
}
-void seq_redo(XLogRecPtr lsn, XLogRecord *record)
+void
+seq_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
- Relation reln;
- Buffer buffer;
- Page page;
- char *item;
- Size itemsz;
- xl_seq_rec *xlrec = (xl_seq_rec*) XLogRecGetData(record);
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ char *item;
+ Size itemsz;
+ xl_seq_rec *xlrec = (xl_seq_rec *) XLogRecGetData(record);
sequence_magic *sm;
if (info != XLOG_SEQ_LOG)
@@ -772,8 +775,8 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
- elog(STOP, "seq_redo: can't read block of %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
+ elog(STOP, "seq_redo: can't read block of %u/%u",
+ xlrec->node.tblNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
@@ -781,10 +784,10 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
sm = (sequence_magic *) PageGetSpecialPointer(page);
sm->magic = SEQ_MAGIC;
- item = (char*)xlrec + sizeof(xl_seq_rec);
+ item = (char *) xlrec + sizeof(xl_seq_rec);
itemsz = record->xl_len - sizeof(xl_seq_rec);
itemsz = MAXALIGN(itemsz);
- if (PageAddItem(page, (Item)item, itemsz,
+ if (PageAddItem(page, (Item) item, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "seq_redo: failed to add item to page");
@@ -795,14 +798,16 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
return;
}
-void seq_undo(XLogRecPtr lsn, XLogRecord *record)
+void
+seq_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-void seq_desc(char *buf, uint8 xl_info, char* rec)
+void
+seq_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
- xl_seq_rec *xlrec = (xl_seq_rec*) rec;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
+ xl_seq_rec *xlrec = (xl_seq_rec *) rec;
if (info == XLOG_SEQ_LOG)
strcat(buf, "log: ");
@@ -813,5 +818,5 @@ void seq_desc(char *buf, uint8 xl_info, char* rec)
}
sprintf(buf + strlen(buf), "node %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
+ xlrec->node.tblNode, xlrec->node.relNode);
}
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 4a6ddef9283..034b49887e7 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.88 2001/03/14 21:50:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@ static void DescribeTrigger(TriggerDesc *trigdesc, Trigger *trigger);
static HeapTuple GetTupleForTrigger(EState *estate, ItemPointer tid,
TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(Trigger *trigger,
- TriggerData *trigdata,
- MemoryContext per_tuple_context);
+ TriggerData *trigdata,
+ MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(Relation rel, int event,
HeapTuple oldtup, HeapTuple newtup);
@@ -87,7 +87,9 @@ CreateTrigger(CreateTrigStmt *stmt)
constrrelid = InvalidOid;
else
{
- /* NoLock is probably sufficient here, since we're only
+
+ /*
+ * NoLock is probably sufficient here, since we're only
* interested in getting the relation's OID...
*/
rel = heap_openr(stmt->constrrelname, NoLock);
@@ -192,7 +194,7 @@ CreateTrigger(CreateTrigStmt *stmt)
values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->trigname));
+ CStringGetDatum(stmt->trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
@@ -211,7 +213,7 @@ CreateTrigger(CreateTrigStmt *stmt)
foreach(le, stmt->args)
{
- char *ar = ((Value*) lfirst(le))->val.str;
+ char *ar = ((Value *) lfirst(le))->val.str;
len += strlen(ar) + 4;
for (; *ar; ar++)
@@ -224,7 +226,7 @@ CreateTrigger(CreateTrigStmt *stmt)
args[0] = '\0';
foreach(le, stmt->args)
{
- char *s = ((Value*) lfirst(le))->val.str;
+ char *s = ((Value *) lfirst(le))->val.str;
char *d = args + strlen(args);
while (*s)
@@ -237,7 +239,7 @@ CreateTrigger(CreateTrigStmt *stmt)
}
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(args));
+ CStringGetDatum(args));
}
else
{
@@ -569,15 +571,16 @@ RelationBuildTriggers(Relation relation)
sizeof(Trigger));
else
triggers = (Trigger *) repalloc(triggers,
- (found + 1) * sizeof(Trigger));
+ (found + 1) * sizeof(Trigger));
build = &(triggers[found]);
build->tgoid = htup->t_data->t_oid;
build->tgname = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&pg_trigger->tgname))));
build->tgfoid = pg_trigger->tgfoid;
- build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as uninitialized */
+ build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as
+ * uninitialized */
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
build->tgisconstraint = pg_trigger->tgisconstraint;
@@ -836,22 +839,22 @@ ExecCallTriggerFunc(Trigger *trigger,
TriggerData *trigdata,
MemoryContext per_tuple_context)
{
- FunctionCallInfoData fcinfo;
- Datum result;
- MemoryContext oldContext;
+ FunctionCallInfoData fcinfo;
+ Datum result;
+ MemoryContext oldContext;
/*
- * Fmgr lookup info is cached in the Trigger structure,
- * so that we need not repeat the lookup on every call.
+ * Fmgr lookup info is cached in the Trigger structure, so that we
+ * need not repeat the lookup on every call.
*/
if (trigger->tgfunc.fn_oid == InvalidOid)
fmgr_info(trigger->tgfoid, &trigger->tgfunc);
/*
- * Do the function evaluation in the per-tuple memory context,
- * so that leaked memory will be reclaimed once per tuple.
- * Note in particular that any new tuple created by the trigger function
- * will live till the end of the tuple cycle.
+ * Do the function evaluation in the per-tuple memory context, so that
+ * leaked memory will be reclaimed once per tuple. Note in particular
+ * that any new tuple created by the trigger function will live till
+ * the end of the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
@@ -868,8 +871,8 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContextSwitchTo(oldContext);
/*
- * Trigger protocol allows function to return a null pointer,
- * but NOT to set the isnull result flag.
+ * Trigger protocol allows function to return a null pointer, but NOT
+ * to set the isnull result flag.
*/
if (fcinfo.isnull)
elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
@@ -885,7 +888,7 @@ ExecBRInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_INSERT];
HeapTuple newtuple = trigtuple;
HeapTuple oldtuple;
- TriggerData LocTriggerData;
+ TriggerData LocTriggerData;
int i;
LocTriggerData.type = T_TriggerData;
@@ -915,9 +918,7 @@ ExecARInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
- {
DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple);
- }
}
bool
@@ -1240,10 +1241,11 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
static void
deferredTriggerAddEvent(DeferredTriggerEvent event)
{
+
/*
* Since the event list could grow quite long, we keep track of the
- * list tail and append there, rather than just doing a stupid "lappend".
- * This avoids O(N^2) behavior for large numbers of events.
+ * list tail and append there, rather than just doing a stupid
+ * "lappend". This avoids O(N^2) behavior for large numbers of events.
*/
event->dte_next = NULL;
if (deftrig_event_tail == NULL)
@@ -1291,7 +1293,7 @@ deferredTriggerGetPreviousEvent(Oid relid, ItemPointer ctid)
if (previous == NULL)
elog(ERROR,
- "deferredTriggerGetPreviousEvent: event for tuple %s not found",
+ "deferredTriggerGetPreviousEvent: event for tuple %s not found",
DatumGetCString(DirectFunctionCall1(tidout,
PointerGetDatum(ctid))));
return previous;
@@ -1528,7 +1530,7 @@ DeferredTriggerBeginXact(void)
if (deftrig_cxt != NULL)
elog(ERROR,
- "DeferredTriggerBeginXact() called while inside transaction");
+ "DeferredTriggerBeginXact() called while inside transaction");
/* ----------
* Create the per transaction memory context and copy all states
@@ -1671,7 +1673,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
l = deftrig_dfl_trigstates;
while (l != NIL)
{
- List *next = lnext(l);
+ List *next = lnext(l);
pfree(lfirst(l));
pfree(l);
@@ -1700,7 +1702,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
l = deftrig_trigstates;
while (l != NIL)
{
- List *next = lnext(l);
+ List *next = lnext(l);
pfree(lfirst(l));
pfree(l);
@@ -1912,7 +1914,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* Called by ExecAR...Triggers() to add the event to the queue.
*
* NOTE: should be called only if we've determined that an event must
- * be added to the queue. We must save *all* events if there is either
+ * be added to the queue. We must save *all* events if there is either
* an UPDATE or a DELETE deferred trigger; see uses of
* deferredTriggerGetPreviousEvent.
* ----------
@@ -2099,15 +2101,15 @@ DeferredTriggerSaveEvent(Relation rel, int event,
TRIGGER_DEFERRED_ROW_INSERTED)
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&(rel->rd_rel->relname)))));
if (prev_event->dte_item[i].dti_state &
TRIGGER_DEFERRED_KEY_CHANGED)
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&(rel->rd_rel->relname)))));
}
/* ----------
@@ -2142,7 +2144,7 @@ DeferredTriggerSaveEvent(Relation rel, int event,
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ NameGetDatum(&(rel->rd_rel->relname)))));
break;
}
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 378620cb3b7..ede41b64cc8 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.73 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -122,7 +122,7 @@ write_password_file(Relation rel)
CRYPT_PWD_FILE_SEPSTR
"%s\n",
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(DatumGetName(datum_n)))),
+ NameGetDatum(DatumGetName(datum_n)))),
null_p ? "" :
DatumGetCString(DirectFunctionCall1(textout, datum_p)),
null_v ? "\\N" :
@@ -248,7 +248,7 @@ CreateUser(CreateUserStmt *stmt)
* Build a tuple to insert
*/
new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
+ CStringGetDatum(stmt->user));
new_record[Anum_pg_shadow_usesysid - 1] = Int32GetDatum(havesysid ? stmt->sysid : max_id + 1);
AssertState(BoolIsValid(stmt->createdb));
@@ -312,7 +312,7 @@ CreateUser(CreateUserStmt *stmt)
* this in */
ags.action = +1;
ags.listUsers = makeList1(makeInteger(havesysid ?
- stmt->sysid : max_id + 1));
+ stmt->sysid : max_id + 1));
AlterGroup(&ags, "CREATE USER");
}
@@ -377,7 +377,7 @@ AlterUser(AlterUserStmt *stmt)
* Build a tuple to update, perusing the information just obtained
*/
new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
+ CStringGetDatum(stmt->user));
new_record_nulls[Anum_pg_shadow_usename - 1] = ' ';
/* sysid - leave as is */
@@ -561,7 +561,7 @@ DropUser(DropUserStmt *stmt)
elog(ERROR, "DROP USER: user \"%s\" owns database \"%s\", cannot be removed%s",
user,
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(DatumGetName(datum)))),
+ NameGetDatum(DatumGetName(datum)))),
(length(stmt->users) > 1) ? " (no users removed)" : ""
);
}
@@ -603,6 +603,7 @@ DropUser(DropUserStmt *stmt)
}
heap_endscan(scan);
heap_close(pg_rel, AccessExclusiveLock);
+
/*
* Advance command counter so that later iterations of this loop
* will see the changes already made. This is essential if, for
@@ -873,7 +874,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
{
/* Get the uid of the proposed user to add. */
tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(strVal(lfirst(item))),
+ PointerGetDatum(strVal(lfirst(item))),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "%s: user \"%s\" does not exist",
@@ -995,7 +996,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
{
/* Get the uid of the proposed user to drop. */
tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(strVal(lfirst(item))),
+ PointerGetDatum(strVal(lfirst(item))),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "ALTER GROUP: user \"%s\" does not exist", strVal(lfirst(item)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 3606d05f741..078c9b53475 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.187 2001/03/14 08:40:57 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.188 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,11 +47,11 @@
#include "utils/syscache.h"
#include "utils/temprel.h"
-extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
- char *unused, int unlen);
-extern XLogRecPtr log_heap_move(Relation reln,
- Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup);
+extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
+ char *unused, int unlen);
+extern XLogRecPtr log_heap_move(Relation reln,
+ Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup);
static MemoryContext vac_context = NULL;
@@ -78,9 +78,9 @@ static void vpage_insert(VacPageList vacpagelist, VacPage vpnew);
static void get_indices(Relation relation, int *nindices, Relation **Irel);
static void close_indices(int nindices, Relation *Irel);
static IndexInfo **get_index_desc(Relation onerel, int nindices,
- Relation *Irel);
+ Relation *Irel);
static void *vac_find_eq(void *bot, int nelem, int size, void *elm,
- int (*compar) (const void *, const void *));
+ int (*compar) (const void *, const void *));
static int vac_cmp_blk(const void *left, const void *right);
static int vac_cmp_offno(const void *left, const void *right);
static int vac_cmp_vtlinks(const void *left, const void *right);
@@ -120,9 +120,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *anal_cols)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of QueryContext, it will go away eventually
- * even if we suffer an error; there's no need for special abort
- * cleanup logic.
+ * Since it is a child of QueryContext, it will go away eventually even
+ * if we suffer an error; there's no need for special abort cleanup
+ * logic.
*/
vac_context = AllocSetContextCreate(QueryContext,
"Vacuum",
@@ -215,8 +215,8 @@ vacuum_shutdown()
/*
* Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete
- * the active context!
+ * StartTransactionCommand, else we might be trying to delete the
+ * active context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
@@ -360,10 +360,10 @@ vacuum_rel(Oid relid)
{
Relation onerel;
LockRelId onerelid;
- VacPageListData vacuum_pages; /* List of pages to vacuum and/or clean
- * indices */
- VacPageListData fraged_pages; /* List of pages with space enough for
- * re-using */
+ VacPageListData vacuum_pages; /* List of pages to vacuum and/or
+ * clean indices */
+ VacPageListData fraged_pages; /* List of pages with space enough
+ * for re-using */
Relation *Irel;
int32 nindices,
i;
@@ -411,10 +411,10 @@ vacuum_rel(Oid relid)
}
/*
- * Get a session-level exclusive lock too. This will protect our
- * exclusive access to the relation across multiple transactions,
- * so that we can vacuum the relation's TOAST table (if any) secure
- * in the knowledge that no one is diddling the parent relation.
+ * Get a session-level exclusive lock too. This will protect our
+ * exclusive access to the relation across multiple transactions, so
+ * that we can vacuum the relation's TOAST table (if any) secure in
+ * the knowledge that no one is diddling the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
@@ -458,10 +458,11 @@ vacuum_rel(Oid relid)
vacrelstats->hasindex = true;
else
vacrelstats->hasindex = false;
-#ifdef NOT_USED
+#ifdef NOT_USED
+
/*
- * reindex in VACUUM is dangerous under WAL.
- * ifdef out until it becomes safe.
+ * reindex in VACUUM is dangerous under WAL. ifdef out until it
+ * becomes safe.
*/
if (reindex)
{
@@ -470,7 +471,7 @@ vacuum_rel(Oid relid)
Irel = (Relation *) NULL;
activate_indexes_of_a_table(relid, false);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* Clean/scan index relation(s) */
if (Irel != (Relation *) NULL)
@@ -506,6 +507,7 @@ vacuum_rel(Oid relid)
}
else
{
+
/*
* Flush dirty pages out to disk. We must do this even if we
* didn't do anything else, because we want to ensure that all
@@ -518,10 +520,10 @@ vacuum_rel(Oid relid)
i);
}
}
-#ifdef NOT_USED
+#ifdef NOT_USED
if (reindex)
activate_indexes_of_a_table(relid, true);
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* all done with this class, but hold lock until commit */
heap_close(onerel, NoLock);
@@ -537,11 +539,11 @@ vacuum_rel(Oid relid)
CommitTransactionCommand();
/*
- * If the relation has a secondary toast one, vacuum that too
- * while we still hold the session lock on the master table.
- * We don't need to propagate "analyze" to it, because the toaster
- * always uses hardcoded index access and statistics are
- * totally unimportant for toast relations
+ * If the relation has a secondary toast one, vacuum that too while we
+ * still hold the session lock on the master table. We don't need to
+ * propagate "analyze" to it, because the toaster always uses
+ * hardcoded index access and statistics are totally unimportant for
+ * toast relations
*/
if (toast_relid != InvalidOid)
vacuum_rel(toast_relid);
@@ -563,7 +565,7 @@ vacuum_rel(Oid relid)
*/
static void
scan_heap(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages)
+ VacPageList vacuum_pages, VacPageList fraged_pages)
{
BlockNumber nblocks,
blkno;
@@ -845,7 +847,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* dead tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
* determine how much space will be available after
- * removal of dead tuples. But note we are NOT changing
+ * removal of dead tuples. But note we are NOT changing
* the real page yet...
*/
if (tempPage == (Page) NULL)
@@ -964,8 +966,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
- nunused, (unsigned long)min_tlen, (unsigned long)max_tlen,
- (unsigned long)free_size, (unsigned long)usable_free_size,
+ nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
+ (unsigned long) free_size, (unsigned long) usable_free_size,
empty_end_pages, fraged_pages->num_pages,
show_rusage(&ru0));
@@ -984,8 +986,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
*/
static void
repair_frag(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages,
- int nindices, Relation *Irel)
+ VacPageList vacuum_pages, VacPageList fraged_pages,
+ int nindices, Relation *Irel)
{
TransactionId myXID;
CommandId myCID;
@@ -1077,7 +1079,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
dowrite = false;
if (blkno == last_vacuum_block) /* it's reaped page */
{
- if (last_vacuum_page->offsets_free > 0) /* there are dead tuples */
+ if (last_vacuum_page->offsets_free > 0) /* there are dead tuples */
{ /* on this page - clean */
Assert(!isempty);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1100,7 +1102,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_vacuum_block = -1;
}
if (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno ==
+ fraged_pages->pagedesc[num_fraged_pages - 1]->blkno ==
(BlockNumber) blkno)
{
/* page is in fraged_pages too; remove it */
@@ -1142,8 +1144,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* If this (chain) tuple is moved by me already then I
- * have to check is it in vacpage or not - i.e. is it moved
- * while cleaning this page or some previous one.
+ * have to check is it in vacpage or not - i.e. is it
+ * moved while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@@ -1232,8 +1234,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be
* removed and seems that this should be handled
- * in scan_heap(), but it's not implemented at
- * the moment and so we just stop shrinking here.
+ * in scan_heap(), but it's not implemented at the
+ * moment and so we just stop shrinking here.
*/
ReleaseBuffer(Cbuf);
pfree(vtmove);
@@ -1256,15 +1258,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
/*
- * if to_vacpage no longer has enough free space to be
- * useful, remove it from fraged_pages list
+ * if to_vacpage no longer has enough free space
+ * to be useful, remove it from fraged_pages list
*/
if (to_vacpage != NULL &&
- !enough_space(to_vacpage, vacrelstats->min_tlen))
+ !enough_space(to_vacpage, vacrelstats->min_tlen))
{
Assert(num_fraged_pages > to_item);
memmove(fraged_pages->pagedesc + to_item,
- fraged_pages->pagedesc + to_item + 1,
+ fraged_pages->pagedesc + to_item + 1,
sizeof(VacPage) * (num_fraged_pages - to_item - 1));
num_fraged_pages--;
}
@@ -1326,10 +1328,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vtld.new_tid = tp.t_self;
vtlp = (VTupleLink)
vac_find_eq((void *) (vacrelstats->vtlinks),
- vacrelstats->num_vtlinks,
- sizeof(VTupleLinkData),
- (void *) &vtld,
- vac_cmp_vtlinks);
+ vacrelstats->num_vtlinks,
+ sizeof(VTupleLinkData),
+ (void *) &vtld,
+ vac_cmp_vtlinks);
if (vtlp == NULL)
elog(ERROR, "Parent tuple was not found");
tp.t_self = vtlp->this_tid;
@@ -1416,7 +1418,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSetInvalid(&Ctid);
for (ti = 0; ti < num_vtmove; ti++)
{
- VacPage destvacpage = vtmove[ti].vacpage;
+ VacPage destvacpage = vtmove[ti].vacpage;
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
@@ -1460,21 +1462,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*
* NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same
- * (since this tuple-chain member can be on a page lower
- * than the one we're currently processing in the outer
- * loop). If that's true, then after vacuum_page() the
- * source tuple will have been moved, and tuple.t_data
- * will be pointing at garbage. Therefore we must do
- * everything that uses tuple.t_data BEFORE this step!!
+ * (since this tuple-chain member can be on a page
+ * lower than the one we're currently processing in
+ * the outer loop). If that's true, then after
+ * vacuum_page() the source tuple will have been
+ * moved, and tuple.t_data will be pointing at
+ * garbage. Therefore we must do everything that uses
+ * tuple.t_data BEFORE this step!!
*
* This path is different from the other callers of
- * vacuum_page, because we have already incremented the
- * vacpage's offsets_used field to account for the
+ * vacuum_page, because we have already incremented
+ * the vacpage's offsets_used field to account for the
* tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is
- * wrong. But since that's a good debugging check for
- * all other callers, we work around it here rather
- * than remove it.
+ * vacuum_page's check for offsets_used == 0 is wrong.
+ * But since that's a good debugging check for all
+ * other callers, we work around it here rather than
+ * remove it.
*/
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
{
@@ -1498,7 +1501,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (newoff == InvalidOffsetNumber)
{
elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
- (unsigned long)tuple_len, destvacpage->blkno);
+ (unsigned long) tuple_len, destvacpage->blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@@ -1507,9 +1510,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSet(&(newtup.t_self), destvacpage->blkno, newoff);
{
- XLogRecPtr recptr =
- log_heap_move(onerel, Cbuf, tuple.t_self,
- cur_buffer, &newtup);
+ XLogRecPtr recptr =
+ log_heap_move(onerel, Cbuf, tuple.t_self,
+ cur_buffer, &newtup);
if (Cbuf != cur_buffer)
{
@@ -1526,7 +1529,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain otherwise.
+ * tuple in chain, and to next tuple in chain
+ * otherwise.
*/
if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self;
@@ -1552,13 +1556,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (Irel != (Relation *) NULL)
{
+
/*
* XXX using CurrentMemoryContext here means
- * intra-vacuum memory leak for functional indexes.
- * Should fix someday.
+ * intra-vacuum memory leak for functional
+ * indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
- * Probably should change it to use ExecOpenIndices.
+ * Probably should change it to use
+ * ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@@ -1653,8 +1659,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
elog(STOP, "\
failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
- (unsigned long)tuple_len, cur_page->blkno, (unsigned long)cur_page->free,
- cur_page->offsets_used, cur_page->offsets_free);
+ (unsigned long) tuple_len, cur_page->blkno, (unsigned long) cur_page->free,
+ cur_page->offsets_used, cur_page->offsets_free);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@@ -1673,9 +1679,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
{
- XLogRecPtr recptr =
- log_heap_move(onerel, buf, tuple.t_self,
- cur_buffer, &newtup);
+ XLogRecPtr recptr =
+ log_heap_move(onerel, buf, tuple.t_self,
+ cur_buffer, &newtup);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@@ -1698,13 +1704,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* insert index' tuples if needed */
if (Irel != (Relation *) NULL)
{
+
/*
- * XXX using CurrentMemoryContext here means
- * intra-vacuum memory leak for functional indexes.
- * Should fix someday.
+ * XXX using CurrentMemoryContext here means intra-vacuum
+ * memory leak for functional indexes. Should fix someday.
*
- * XXX This code fails to handle partial indexes!
- * Probably should change it to use ExecOpenIndices.
+ * XXX This code fails to handle partial indexes! Probably
+ * should change it to use ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@@ -1803,14 +1809,15 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (num_moved > 0)
{
+
/*
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indices,
- * etc. For now, a quick hack: record status of current transaction
- * as committed, and continue.
+ * etc. For now, a quick hack: record status of current
+ * transaction as committed, and continue.
*/
RecordTransactionCommit();
}
@@ -1873,7 +1880,7 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
nblocks, blkno, num_moved,
show_rusage(&ru0));
- /*
+ /*
* Reflect the motion of system tuples to catalog cache here.
*/
CommandCounterIncrement();
@@ -1883,13 +1890,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* vacuum indices again if needed */
if (Irel != (Relation *) NULL)
{
- VacPage *vpleft,
+ VacPage *vpleft,
*vpright,
vpsave;
/* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
+ vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft;
@@ -1906,9 +1913,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (vacpage->blkno == (BlockNumber) (blkno - 1) &&
vacpage->offsets_free > 0)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- int uncnt;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ int uncnt;
buf = ReadBuffer(onerel, vacpage->blkno);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1943,8 +1950,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buf, (char*)unused,
- (char*)(&(unused[uncnt])) - (char*)unused);
+
+ recptr = log_heap_clean(onerel, buf, (char *) unused,
+ (char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@@ -1962,9 +1970,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all tuples
- * have correct on-row commit status on disk (see bufmgr.c's comments
- * for FlushRelationBuffers()).
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
i = FlushRelationBuffers(onerel, blkno);
if (i < 0)
@@ -2005,8 +2013,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
int i;
nblocks = vacuum_pages->num_pages;
- nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with
- * them */
+ nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
{
@@ -2022,9 +2029,9 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all tuples
- * have correct on-row commit status on disk (see bufmgr.c's comments
- * for FlushRelationBuffers()).
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages);
nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages;
@@ -2042,7 +2049,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
vacrelstats->num_pages, nblocks);
nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
Assert(nblocks >= 0);
- vacrelstats->num_pages = nblocks; /* set new number of blocks */
+ vacrelstats->num_pages = nblocks; /* set new number of
+ * blocks */
}
}
@@ -2053,12 +2061,12 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
static void
vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- int uncnt;
- Page page = BufferGetPage(buffer);
- ItemId itemid;
- int i;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ int uncnt;
+ Page page = BufferGetPage(buffer);
+ ItemId itemid;
+ int i;
/* There shouldn't be any tuples moved onto the page yet! */
Assert(vacpage->offsets_used == 0);
@@ -2072,8 +2080,9 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buffer, (char*)unused,
- (char*)(&(unused[uncnt])) - (char*)unused);
+
+ recptr = log_heap_clean(onerel, buffer, (char *) unused,
+ (char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@@ -2220,8 +2229,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
vp = &vacpage;
vpp = (VacPage *) vac_find_eq((void *) (vacpagelist->pagedesc),
- vacpagelist->num_pages, sizeof(VacPage), (void *) &vp,
- vac_cmp_blk);
+ vacpagelist->num_pages, sizeof(VacPage), (void *) &vp,
+ vac_cmp_blk);
if (vpp == (VacPage *) NULL)
return (VacPage) NULL;
@@ -2235,8 +2244,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
}
voff = (OffsetNumber *) vac_find_eq((void *) (vp->offsets),
- vp->offsets_free, sizeof(OffsetNumber), (void *) &ioffno,
- vac_cmp_offno);
+ vp->offsets_free, sizeof(OffsetNumber), (void *) &ioffno,
+ vac_cmp_offno);
if (voff == (OffsetNumber *) NULL)
return (VacPage) NULL;
@@ -2265,7 +2274,7 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
*/
static void
update_relstats(Oid relid, int num_pages, int num_tuples, bool hasindex,
- VRelStats *vacrelstats)
+ VRelStats *vacrelstats)
{
Relation rd;
HeapTupleData rtup;
@@ -2313,7 +2322,7 @@ update_relstats(Oid relid, int num_pages, int num_tuples, bool hasindex,
static void
reap_page(VacPageList vacpagelist, VacPage vacpage)
{
- VacPage newvacpage;
+ VacPage newvacpage;
/* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) + vacpage->offsets_free * sizeof(OffsetNumber));
@@ -2354,7 +2363,7 @@ vpage_insert(VacPageList vacpagelist, VacPage vpnew)
static void *
vac_find_eq(void *bot, int nelem, int size, void *elm,
- int (*compar) (const void *, const void *))
+ int (*compar) (const void *, const void *))
{
int res;
int last = nelem - 1;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 6f07bff095d..cc5f64f41a0 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.45 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.46 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -453,6 +453,7 @@ parse_DefaultXactIsoLevel(char *value)
{
#if 0
TransactionState s = CurrentTransactionState;
+
#endif
if (value == NULL)
@@ -632,7 +633,7 @@ parse_client_encoding(char *value)
}
#else
if (value &&
- strcasecmp(value, pg_encoding_to_char(pg_get_client_encoding())) != 0)
+ strcasecmp(value, pg_encoding_to_char(pg_get_client_encoding())) != 0)
elog(ERROR, "Client encoding %s is not supported", value);
#endif
return TRUE;
@@ -701,28 +702,27 @@ reset_server_encoding(void)
void
SetPGVariable(const char *name, const char *value)
{
- char *mvalue = value ? pstrdup(value) : ((char*) NULL);
-
- /*
- * Special cases ought to be removed and handled separately
- * by TCOP
- */
- if (strcasecmp(name, "datestyle")==0)
- parse_date(mvalue);
- else if (strcasecmp(name, "timezone")==0)
- parse_timezone(mvalue);
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- parse_DefaultXactIsoLevel(mvalue);
- else if (strcasecmp(name, "XactIsoLevel")==0)
- parse_XactIsoLevel(mvalue);
- else if (strcasecmp(name, "client_encoding")==0)
- parse_client_encoding(mvalue);
- else if (strcasecmp(name, "server_encoding")==0)
- parse_server_encoding(mvalue);
- else if (strcasecmp(name, "random_seed")==0)
- parse_random_seed(mvalue);
- else
- SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
+ char *mvalue = value ? pstrdup(value) : ((char *) NULL);
+
+ /*
+ * Special cases ought to be removed and handled separately by TCOP
+ */
+ if (strcasecmp(name, "datestyle") == 0)
+ parse_date(mvalue);
+ else if (strcasecmp(name, "timezone") == 0)
+ parse_timezone(mvalue);
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ parse_DefaultXactIsoLevel(mvalue);
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ parse_XactIsoLevel(mvalue);
+ else if (strcasecmp(name, "client_encoding") == 0)
+ parse_client_encoding(mvalue);
+ else if (strcasecmp(name, "server_encoding") == 0)
+ parse_server_encoding(mvalue);
+ else if (strcasecmp(name, "random_seed") == 0)
+ parse_random_seed(mvalue);
+ else
+ SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
if (mvalue)
pfree(mvalue);
@@ -732,44 +732,45 @@ SetPGVariable(const char *name, const char *value)
void
GetPGVariable(const char *name)
{
- if (strcasecmp(name, "datestyle")==0)
- show_date();
- else if (strcasecmp(name, "timezone")==0)
- show_timezone();
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- show_DefaultXactIsoLevel();
- else if (strcasecmp(name, "XactIsoLevel")==0)
- show_XactIsoLevel();
- else if (strcasecmp(name, "client_encoding")==0)
- show_client_encoding();
- else if (strcasecmp(name, "server_encoding")==0)
- show_server_encoding();
- else if (strcasecmp(name, "random_seed")==0)
- show_random_seed();
- else
- {
- const char * val = GetConfigOption(name);
- elog(NOTICE, "%s is %s", name, val);
- }
-}
+ if (strcasecmp(name, "datestyle") == 0)
+ show_date();
+ else if (strcasecmp(name, "timezone") == 0)
+ show_timezone();
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ show_DefaultXactIsoLevel();
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ show_XactIsoLevel();
+ else if (strcasecmp(name, "client_encoding") == 0)
+ show_client_encoding();
+ else if (strcasecmp(name, "server_encoding") == 0)
+ show_server_encoding();
+ else if (strcasecmp(name, "random_seed") == 0)
+ show_random_seed();
+ else
+ {
+ const char *val = GetConfigOption(name);
+
+ elog(NOTICE, "%s is %s", name, val);
+ }
+}
void
ResetPGVariable(const char *name)
{
- if (strcasecmp(name, "datestyle")==0)
- reset_date();
- else if (strcasecmp(name, "timezone")==0)
- reset_timezone();
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- reset_DefaultXactIsoLevel();
- else if (strcasecmp(name, "XactIsoLevel")==0)
- reset_XactIsoLevel();
- else if (strcasecmp(name, "client_encoding")==0)
- reset_client_encoding();
- else if (strcasecmp(name, "server_encoding")==0)
- reset_server_encoding();
- else if (strcasecmp(name, "random_seed")==0)
- reset_random_seed();
- else
- SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);
-}
+ if (strcasecmp(name, "datestyle") == 0)
+ reset_date();
+ else if (strcasecmp(name, "timezone") == 0)
+ reset_timezone();
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ reset_DefaultXactIsoLevel();
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ reset_XactIsoLevel();
+ else if (strcasecmp(name, "client_encoding") == 0)
+ reset_client_encoding();
+ else if (strcasecmp(name, "server_encoding") == 0)
+ reset_server_encoding();
+ else if (strcasecmp(name, "random_seed") == 0)
+ reset_random_seed();
+ else
+ SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);
+}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 99481d4d54b..320f2c08e92 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: view.c,v 1.53 2001/01/24 19:42:53 momjian Exp $
+ * $Id: view.c,v 1.54 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ DefineVirtualRelation(char *relname, List *tlist)
TargetEntry *entry = lfirst(t);
Resdom *res = entry->resdom;
- if (! res->resjunk)
+ if (!res->resjunk)
{
char *resname = res->resname;
char *restypename = typeidTypeName(res->restype);
@@ -118,9 +118,9 @@ MakeRetrieveViewRuleName(char *viewName)
snprintf(buf, buflen, "_RET%s", viewName);
/* clip to less than NAMEDATALEN bytes, if necessary */
#ifdef MULTIBYTE
- maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN-1);
+ maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN - 1);
#else
- maxlen = NAMEDATALEN-1;
+ maxlen = NAMEDATALEN - 1;
#endif
if (maxlen < buflen)
buf[maxlen] = '\0';
@@ -211,12 +211,12 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we
- * don't want to scribble on our input, it's that the parser has
- * a bad habit of outputting multiple links to the same subtree
- * for constructs like BETWEEN, and we mustn't have OffsetVarNodes
- * increment the varno of a Var node twice. copyObject will expand
- * any multiply-referenced subtree into multiple copies.
+ * Make a copy of the given parsetree. It's not so much that we don't
+ * want to scribble on our input, it's that the parser has a bad habit
+ * of outputting multiple links to the same subtree for constructs
+ * like BETWEEN, and we mustn't have OffsetVarNodes increment the
+ * varno of a Var node twice. copyObject will expand any
+ * multiply-referenced subtree into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -261,6 +261,7 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
void
DefineView(char *viewName, Query *viewParse)
{
+
/*
* Create the "view" relation NOTE: if it already exists, the xact
* will be aborted.
@@ -295,9 +296,10 @@ DefineView(char *viewName, Query *viewParse)
void
RemoveView(char *viewName)
{
+
/*
- * We just have to drop the relation; the associated rules will
- * be cleaned up automatically.
+ * We just have to drop the relation; the associated rules will be
+ * cleaned up automatically.
*/
heap_drop_with_catalog(viewName, allowSystemTableMods);
}