aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2003-08-04 00:43:34 +0000
committerBruce Momjian <bruce@momjian.us>2003-08-04 00:43:34 +0000
commit089003fb462fcce46c02bf47322b429f73c33c50 (patch)
tree77d78bc3a149df06f5603f60200a6ab363336624 /src/backend/commands
parent63354a0228a1dbc4a0d5ddc8ecdd8326349d2100 (diff)
downloadpostgresql-089003fb462fcce46c02bf47322b429f73c33c50.tar.gz
postgresql-089003fb462fcce46c02bf47322b429f73c33c50.zip
pgindent run.
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/aggregatecmds.c10
-rw-r--r--src/backend/commands/alter.c82
-rw-r--r--src/backend/commands/analyze.c8
-rw-r--r--src/backend/commands/async.c13
-rw-r--r--src/backend/commands/cluster.c133
-rw-r--r--src/backend/commands/comment.c35
-rw-r--r--src/backend/commands/conversioncmds.c8
-rw-r--r--src/backend/commands/copy.c209
-rw-r--r--src/backend/commands/dbcommands.c60
-rw-r--r--src/backend/commands/define.c5
-rw-r--r--src/backend/commands/explain.c40
-rw-r--r--src/backend/commands/functioncmds.c51
-rw-r--r--src/backend/commands/indexcmds.c85
-rw-r--r--src/backend/commands/opclasscmds.c18
-rw-r--r--src/backend/commands/operatorcmds.c6
-rw-r--r--src/backend/commands/portalcmds.c64
-rw-r--r--src/backend/commands/prepare.c58
-rw-r--r--src/backend/commands/proclang.c12
-rw-r--r--src/backend/commands/schemacmds.c14
-rw-r--r--src/backend/commands/sequence.c54
-rw-r--r--src/backend/commands/tablecmds.c285
-rw-r--r--src/backend/commands/trigger.c228
-rw-r--r--src/backend/commands/typecmds.c326
-rw-r--r--src/backend/commands/user.c56
-rw-r--r--src/backend/commands/vacuum.c61
-rw-r--r--src/backend/commands/vacuumlazy.c30
-rw-r--r--src/backend/commands/variable.c85
-rw-r--r--src/backend/commands/view.c6
28 files changed, 1044 insertions, 998 deletions
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 1d9b25b5b0a..5a57d5c5c77 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.12 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.13 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -256,16 +256,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
if (basetypeOid == ANYOID)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function %s(*) already exists in schema \"%s\"",
- newname,
- get_namespace_name(namespaceOid))));
+ errmsg("function %s(*) already exists in schema \"%s\"",
+ newname,
+ get_namespace_name(namespaceOid))));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes),
+ procForm->proargtypes),
get_namespace_name(namespaceOid))));
}
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index b377635099e..4fd43871e97 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.4 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.5 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,52 +79,52 @@ ExecRenameStmt(RenameStmt *stmt)
case OBJECT_TABLE:
case OBJECT_COLUMN:
case OBJECT_TRIGGER:
- {
- Oid relid;
+ {
+ Oid relid;
- CheckRelationOwnership(stmt->relation, true);
+ CheckRelationOwnership(stmt->relation, true);
- relid = RangeVarGetRelid(stmt->relation, false);
+ relid = RangeVarGetRelid(stmt->relation, false);
- switch (stmt->renameType)
- {
- case OBJECT_TABLE:
+ switch (stmt->renameType)
{
- /*
- * RENAME TABLE requires that we (still) hold
- * CREATE rights on the containing namespace, as
- * well as ownership of the table.
- */
- Oid namespaceId = get_rel_namespace(relid);
- AclResult aclresult;
-
- aclresult = pg_namespace_aclcheck(namespaceId,
- GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
-
- renamerel(relid, stmt->newname);
- break;
- }
- case OBJECT_COLUMN:
- renameatt(relid,
- stmt->subname, /* old att name */
- stmt->newname, /* new att name */
+ case OBJECT_TABLE:
+ {
+ /*
+ * RENAME TABLE requires that we (still) hold
+ * CREATE rights on the containing namespace,
+ * as well as ownership of the table.
+ */
+ Oid namespaceId = get_rel_namespace(relid);
+ AclResult aclresult;
+
+ aclresult = pg_namespace_aclcheck(namespaceId,
+ GetUserId(),
+ ACL_CREATE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
+ get_namespace_name(namespaceId));
+
+ renamerel(relid, stmt->newname);
+ break;
+ }
+ case OBJECT_COLUMN:
+ renameatt(relid,
+ stmt->subname, /* old att name */
+ stmt->newname, /* new att name */
interpretInhOption(stmt->relation->inhOpt), /* recursive? */
- false); /* recursing already? */
- break;
- case OBJECT_TRIGGER:
- renametrig(relid,
- stmt->subname, /* old att name */
- stmt->newname); /* new att name */
- break;
- default:
- /*can't happen*/;
+ false); /* recursing already? */
+ break;
+ case OBJECT_TRIGGER:
+ renametrig(relid,
+ stmt->subname, /* old att name */
+ stmt->newname); /* new att name */
+ break;
+ default:
+ /* can't happen */ ;
+ }
+ break;
}
- break;
- }
default:
elog(ERROR, "unrecognized rename stmt type: %d",
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 5277884f1f8..dac2d5d7bbd 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.56 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.57 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -220,9 +220,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Silently ignore tables that are temp tables of other backends ---
- * trying to analyze these is rather pointless, since their
- * contents are probably not up-to-date on disk. (We don't throw a
- * warning here; it would just lead to chatter during a database-wide
+ * trying to analyze these is rather pointless, since their contents
+ * are probably not up-to-date on disk. (We don't throw a warning
+ * here; it would just lead to chatter during a database-wide
* ANALYZE.)
*/
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index dafea7c8695..69085740cc5 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.96 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.97 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -603,10 +603,10 @@ Async_NotifyHandler(SIGNAL_ARGS)
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it off
- * while messing with the NOTIFY state. (We would have to save
- * and restore it anyway, because PGSemaphore operations inside
- * ProcessIncomingNotify() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it
+ * off while messing with the NOTIFY state. (We would have to
+ * save and restore it anyway, because PGSemaphore operations
+ * inside ProcessIncomingNotify() might reset it.)
*/
ImmediateInterruptOK = false;
@@ -639,7 +639,8 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if
+ * needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 18f6bfcf6b5..23e03443fc5 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.112 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.113 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,12 +58,12 @@ typedef struct
*/
typedef struct
{
- Oid tableOid;
- Oid indexOid;
-} RelToCluster;
+ Oid tableOid;
+ Oid indexOid;
+} RelToCluster;
-static void cluster_rel(RelToCluster *rv, bool recheck);
+static void cluster_rel(RelToCluster * rv, bool recheck);
static Oid make_new_heap(Oid OIDOldHeap, const char *NewName);
static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
static List *get_indexattr_list(Relation OldHeap, Oid OldIndex);
@@ -74,7 +74,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
/*---------------------------------------------------------------------------
- * This cluster code allows for clustering multiple tables at once. Because
+ * This cluster code allows for clustering multiple tables at once. Because
* of this, we cannot just run everything on a single transaction, or we
* would be forced to acquire exclusive locks on all the tables being
* clustered, simultaneously --- very likely leading to deadlock.
@@ -82,17 +82,17 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
* To solve this we follow a similar strategy to VACUUM code,
* clustering each relation in a separate transaction. For this to work,
* we need to:
- * - provide a separate memory context so that we can pass information in
- * a way that survives across transactions
- * - start a new transaction every time a new relation is clustered
- * - check for validity of the information on to-be-clustered relations,
- * as someone might have deleted a relation behind our back, or
- * clustered one on a different index
- * - end the transaction
+ * - provide a separate memory context so that we can pass information in
+ * a way that survives across transactions
+ * - start a new transaction every time a new relation is clustered
+ * - check for validity of the information on to-be-clustered relations,
+ * as someone might have deleted a relation behind our back, or
+ * clustered one on a different index
+ * - end the transaction
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation being specified without index. In that case,
+ * We also allow a relation being specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -103,10 +103,10 @@ cluster(ClusterStmt *stmt)
if (stmt->relation != NULL)
{
/* This is the single-relation case. */
- Oid tableOid,
- indexOid = InvalidOid;
- Relation rel;
- RelToCluster rvtc;
+ Oid tableOid,
+ indexOid = InvalidOid;
+ Relation rel;
+ RelToCluster rvtc;
/* Find and lock the table */
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
@@ -123,10 +123,10 @@ cluster(ClusterStmt *stmt)
List *index;
/* We need to find the index that has indisclustered set. */
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index indexForm;
+ HeapTuple idxtuple;
+ Form_pg_index indexForm;
indexOid = lfirsto(index);
idxtuple = SearchSysCache(INDEXRELID,
@@ -152,14 +152,17 @@ cluster(ClusterStmt *stmt)
}
else
{
- /* The index is expected to be in the same namespace as the relation. */
+ /*
+ * The index is expected to be in the same namespace as the
+ * relation.
+ */
indexOid = get_relname_relid(stmt->indexname,
rel->rd_rel->relnamespace);
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("index \"%s\" for table \"%s\" does not exist",
- stmt->indexname, stmt->relation->relname)));
+ errmsg("index \"%s\" for table \"%s\" does not exist",
+ stmt->indexname, stmt->relation->relname)));
}
/* All other checks are done in cluster_rel() */
@@ -175,16 +178,16 @@ cluster(ClusterStmt *stmt)
else
{
/*
- * This is the "multi relation" case. We need to cluster all tables
- * that have some index with indisclustered set.
+ * This is the "multi relation" case. We need to cluster all
+ * tables that have some index with indisclustered set.
*/
- MemoryContext cluster_context;
- List *rv,
- *rvs;
+ MemoryContext cluster_context;
+ List *rv,
+ *rvs;
/*
- * We cannot run this form of CLUSTER inside a user transaction block;
- * we'd be holding locks way too long.
+ * We cannot run this form of CLUSTER inside a user transaction
+ * block; we'd be holding locks way too long.
*/
PreventTransactionChain((void *) stmt, "CLUSTER");
@@ -201,8 +204,8 @@ cluster(ClusterStmt *stmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
- * cluster_context.
+ * Build the list of relations to cluster. Note that this lives
+ * in cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -210,13 +213,14 @@ cluster(ClusterStmt *stmt)
CommitTransactionCommand();
/* Ok, now that we've got them all, cluster them one by one */
- foreach (rv, rvs)
+ foreach(rv, rvs)
{
- RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
+ RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
/* Start a new transaction for each relation. */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
cluster_rel(rvtc, true);
CommitTransactionCommand();
}
@@ -244,7 +248,7 @@ cluster(ClusterStmt *stmt)
* them incrementally while we load the table.
*/
static void
-cluster_rel(RelToCluster *rvtc, bool recheck)
+cluster_rel(RelToCluster * rvtc, bool recheck)
{
Relation OldHeap,
OldIndex;
@@ -256,14 +260,14 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to
* check that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests.
- * We *must* skip the one on indisclustered since it would reject an
+ * If this is a single-transaction CLUSTER, we can skip these tests. We
+ * *must* skip the one on indisclustered since it would reject an
* attempt to cluster a not-previously-clustered index.
*/
if (recheck)
{
- HeapTuple tuple;
- Form_pg_index indexForm;
+ HeapTuple tuple;
+ Form_pg_index indexForm;
/*
* Check if the relation and index still exist before opening them
@@ -319,10 +323,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
RelationGetRelationName(OldHeap))));
/*
- * Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
- * seqscan pass over the table to copy the missing rows, but that seems
- * expensive and tedious.
+ * Disallow clustering on incomplete indexes (those that might not
+ * index every row of the relation). We could relax this by making a
+ * separate seqscan pass over the table to copy the missing rows, but
+ * that seems expensive and tedious.
*/
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
ereport(ERROR,
@@ -334,7 +338,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
/*
* If the AM doesn't index nulls, then it's a partial index unless
- * we can prove all the rows are non-null. Note we only need look
+ * we can prove all the rows are non-null. Note we only need look
* at the first column; multicolumn-capable AMs are *required* to
* index nulls in columns after the first.
*/
@@ -347,7 +351,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster when index access method does not handle nulls"),
errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@@ -382,7 +386,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temp tables of other processes")));
+ errmsg("cannot cluster temp tables of other processes")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
@@ -397,7 +401,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* rebuild_relation: rebuild an existing relation
*
* This is shared code between CLUSTER and TRUNCATE. In the TRUNCATE
- * case, the new relation is built and left empty. In the CLUSTER case,
+ * case, the new relation is built and left empty. In the CLUSTER case,
* it is filled with data read from the old relation in the order specified
* by the index.
*
@@ -432,6 +436,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
OIDNewHeap = make_new_heap(tableOid, NewHeapName);
+
/*
* We don't need CommandCounterIncrement() because make_new_heap did
* it.
@@ -754,8 +759,8 @@ swap_relfilenodes(Oid r1, Oid r2)
/* swap size statistics too, since new rel has freshly-updated stats */
{
- int4 swap_pages;
- float4 swap_tuples;
+ int4 swap_pages;
+ float4 swap_tuples;
swap_pages = relform1->relpages;
relform1->relpages = relform2->relpages;
@@ -857,20 +862,20 @@ swap_relfilenodes(Oid r1, Oid r2)
static List *
get_tables_to_cluster(MemoryContext cluster_context)
{
- Relation indRelation;
- HeapScanDesc scan;
- ScanKeyData entry;
- HeapTuple indexTuple;
- Form_pg_index index;
- MemoryContext old_context;
- RelToCluster *rvtc;
- List *rvs = NIL;
+ Relation indRelation;
+ HeapScanDesc scan;
+ ScanKeyData entry;
+ HeapTuple indexTuple;
+ Form_pg_index index;
+ MemoryContext old_context;
+ RelToCluster *rvtc;
+ List *rvs = NIL;
/*
* Get all indexes that have indisclustered set and are owned by
- * appropriate user. System relations or nailed-in relations cannot ever
- * have indisclustered set, because CLUSTER will refuse to set it when
- * called with one of them as argument.
+ * appropriate user. System relations or nailed-in relations cannot
+ * ever have indisclustered set, because CLUSTER will refuse to set it
+ * when called with one of them as argument.
*/
indRelation = relation_openr(IndexRelationName, AccessShareLock);
ScanKeyEntryInitialize(&entry, 0,
@@ -886,8 +891,8 @@ get_tables_to_cluster(MemoryContext cluster_context)
continue;
/*
- * We have to build the list in a different memory context so
- * it will survive the cross-transaction processing
+ * We have to build the list in a different memory context so it
+ * will survive the cross-transaction processing
*/
old_context = MemoryContextSwitchTo(cluster_context);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index ecd50bdb367..e0ebba0df96 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.67 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.68 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,8 +383,8 @@ CommentAttribute(List *qualname, char *comment)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- attrname, RelationGetRelationName(relation))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ attrname, RelationGetRelationName(relation))));
/* Create the comment using the relation's oid */
@@ -418,16 +418,17 @@ CommentDatabase(List *qualname, char *comment)
database = strVal(lfirst(qualname));
/*
- * We cannot currently support cross-database comments (since other DBs
- * cannot see pg_description of this database). So, we reject attempts
- * to comment on a database other than the current one. Someday this
- * might be improved, but it would take a redesigned infrastructure.
+ * We cannot currently support cross-database comments (since other
+ * DBs cannot see pg_description of this database). So, we reject
+ * attempts to comment on a database other than the current one.
+ * Someday this might be improved, but it would take a redesigned
+ * infrastructure.
*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
- * of the database. Erroring out would prevent pg_restore from completing
- * (which is really pg_restore's fault, but for now we will work around
- * the problem here). Consensus is that the best fix is to treat wrong
- * database name as a WARNING not an ERROR.
+ * of the database. Erroring out would prevent pg_restore from
+ * completing (which is really pg_restore's fault, but for now we will
+ * work around the problem here). Consensus is that the best fix is
+ * to treat wrong database name as a WARNING not an ERROR.
*/
/* First get the database OID */
@@ -569,7 +570,7 @@ CommentRule(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("there are multiple rules \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
@@ -811,8 +812,8 @@ CommentTrigger(List *qualname, char *comment)
if (!HeapTupleIsValid(triggertuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, RelationGetRelationName(relation))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, RelationGetRelationName(relation))));
oid = HeapTupleGetOid(triggertuple);
@@ -891,7 +892,7 @@ CommentConstraint(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("relation \"%s\" has multiple constraints named \"%s\"",
- RelationGetRelationName(relation), conName)));
+ RelationGetRelationName(relation), conName)));
conOid = HeapTupleGetOid(tuple);
}
}
@@ -902,8 +903,8 @@ CommentConstraint(List *qualname, char *comment)
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for relation \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Create the comment with the pg_constraint oid */
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index b917c527aca..e9afb956246 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.9 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.10 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname)
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("conversion \"%s\" already exists in schema \"%s\"",
- newname, get_namespace_name(namespaceOid))));
+ errmsg("conversion \"%s\" already exists in schema \"%s\"",
+ newname, get_namespace_name(namespaceOid))));
/* must be owner */
- if (!superuser() &&
+ if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tup))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index fa91439a579..5c7238de8dc 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.205 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.206 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ typedef enum CopyDest
COPY_FILE, /* to/from file */
COPY_OLD_FE, /* to/from frontend (old protocol) */
COPY_NEW_FE /* to/from frontend (new protocol) */
-} CopyDest;
+} CopyDest;
/*
* Represents the type of data returned by CopyReadAttribute()
@@ -82,17 +82,17 @@ typedef enum EolType
EOL_NL,
EOL_CR,
EOL_CRNL
-} EolType;
+} EolType;
/* non-export function prototypes */
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static char *CopyReadAttribute(const char *delim, CopyReadResult *result);
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
- Oid typelem, bool *isnull);
+ Oid typelem, bool *isnull);
static void CopyAttributeOut(char *string, char *delim);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
@@ -136,6 +136,7 @@ static void CopySendChar(char c);
static void CopySendEndOfRow(bool binary);
static void CopyGetData(void *databuf, int datasize);
static int CopyGetChar(void);
+
#define CopyGetEof() (fe_eof)
static int CopyPeekChar(void);
static void CopyDonePeek(int c, bool pickup);
@@ -155,14 +156,14 @@ SendCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'H');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -200,14 +201,14 @@ ReceiveCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'G');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -289,7 +290,7 @@ CopySendData(void *databuf, int datasize)
/* no hope of recovering connection sync, so FATAL */
ereport(FATAL,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("connection lost during COPY to stdout")));
+ errmsg("connection lost during COPY to stdout")));
}
break;
case COPY_NEW_FE:
@@ -378,7 +379,7 @@ CopyGetData(void *databuf, int datasize)
case COPY_NEW_FE:
while (datasize > 0 && !fe_eof)
{
- int avail;
+ int avail;
while (copy_msgbuf->cursor >= copy_msgbuf->len)
{
@@ -389,24 +390,24 @@ CopyGetData(void *databuf, int datasize)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
if (pq_getmessage(copy_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
switch (mtype)
{
- case 'd': /* CopyData */
+ case 'd': /* CopyData */
break;
- case 'c': /* CopyDone */
+ case 'c': /* CopyDone */
/* COPY IN correctly terminated by frontend */
fe_eof = true;
return;
- case 'f': /* CopyFail */
+ case 'f': /* CopyFail */
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
- pq_getmsgstring(copy_msgbuf))));
+ pq_getmsgstring(copy_msgbuf))));
break;
default:
ereport(ERROR,
@@ -421,7 +422,7 @@ CopyGetData(void *databuf, int datasize)
avail = datasize;
pq_copymsgbytes(copy_msgbuf, databuf, avail);
databuf = (void *) ((char *) databuf + avail);
- datasize =- avail;
+ datasize = -avail;
}
break;
}
@@ -430,7 +431,7 @@ CopyGetData(void *databuf, int datasize)
static int
CopyGetChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -448,16 +449,16 @@ CopyGetChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -479,7 +480,7 @@ CopyGetChar(void)
static int
CopyPeekChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -497,16 +498,16 @@ CopyPeekChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -524,7 +525,7 @@ CopyDonePeek(int c, bool pickup)
switch (copy_dest)
{
case COPY_FILE:
- if (!pickup)
+ if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c, copy_file);
@@ -537,7 +538,11 @@ CopyDonePeek(int c, bool pickup)
/* We want to pick it up */
(void) pq_getbyte();
}
- /* If we didn't want to pick it up, just leave it where it sits */
+
+ /*
+ * If we didn't want to pick it up, just leave it where it
+ * sits
+ */
break;
case COPY_NEW_FE:
if (!pickup)
@@ -737,7 +742,7 @@ DoCopy(const CopyStmt *stmt)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
/*
* Presently, only single-character delimiter strings are supported.
@@ -791,8 +796,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy to non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy to non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -810,8 +815,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- filename)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -841,8 +846,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy from non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy from non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -863,7 +868,7 @@ DoCopy(const CopyStmt *stmt)
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask((mode_t) 022);
copy_file = AllocateFile(filename, PG_BINARY_W);
@@ -872,8 +877,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for writing: %m",
- filename)));
+ errmsg("could not open file \"%s\" for writing: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -955,8 +960,8 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Create a temporary memory context that we can reset once per row
- * to recover palloc'd memory. This avoids any problems with leaks
+ * Create a temporary memory context that we can reset once per row to
+ * recover palloc'd memory. This avoids any problems with leaks
* inside datatype output routines, and should be faster than retail
* pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
*/
@@ -1040,9 +1045,9 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
if (isnull)
{
if (!binary)
- CopySendString(null_print); /* null indicator */
+ CopySendString(null_print); /* null indicator */
else
- CopySendInt32(-1); /* null marker */
+ CopySendInt32(-1); /* null marker */
}
else
{
@@ -1060,7 +1065,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(elements[attnum - 1])));
+ ObjectIdGetDatum(elements[attnum - 1])));
/* We assume the result will not have been toasted */
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
CopySendData(VARDATA(outputbytes),
@@ -1199,7 +1204,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
/* attribute is NOT to be copied from input */
/* use default value if one exists */
- Node *defexpr = build_column_default(rel, i + 1);
+ Node *defexpr = build_column_default(rel, i + 1);
if (defexpr != NULL)
{
@@ -1219,10 +1224,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Easiest way to do this is to use parse_coerce.c to set up
* an expression that checks the constraints. (At present,
- * the expression might contain a length-coercion-function call
- * and/or CoerceToDomain nodes.) The bottom of the expression
- * is a Param node so that we can fill in the actual datum during
- * the data input loop.
+ * the expression might contain a length-coercion-function
+ * call and/or CoerceToDomain nodes.) The bottom of the
+ * expression is a Param node so that we can fill in the
+ * actual datum during the data input loop.
*/
prm = makeNode(Param);
prm->paramkind = PARAM_EXEC;
@@ -1241,11 +1246,11 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Check BEFORE STATEMENT insertion triggers. It's debateable
- * whether we should do this for COPY, since it's not really an
- * "INSERT" statement as such. However, executing these triggers
- * maintains consistency with the EACH ROW triggers that we already
- * fire on COPY.
+ * Check BEFORE STATEMENT insertion triggers. It's debateable whether
+ * we should do this for COPY, since it's not really an "INSERT"
+ * statement as such. However, executing these triggers maintains
+ * consistency with the EACH ROW triggers that we already fire on
+ * COPY.
*/
ExecBSInsertTriggers(estate, resultRelInfo);
@@ -1276,13 +1281,13 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if ((tmp >> 16) != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unrecognized critical flags in COPY file header")));
+ errmsg("unrecognized critical flags in COPY file header")));
/* Header extension length */
tmp = CopyGetInt32();
if (CopyGetEof() || tmp < 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (missing length)")));
+ errmsg("invalid COPY file header (missing length)")));
/* Skip extension header, if present */
while (tmp-- > 0)
{
@@ -1290,7 +1295,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (CopyGetEof())
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (wrong length)")));
+ errmsg("invalid COPY file header (wrong length)")));
}
}
@@ -1418,9 +1423,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Complain if there are more fields on the input line.
*
- * Special case: if we're reading a zero-column table, we
- * won't yet have called CopyReadAttribute() at all; so do that
- * and check we have an empty line. Fortunately we can keep that
+ * Special case: if we're reading a zero-column table, we won't
+ * yet have called CopyReadAttribute() at all; so do that and
+ * check we have an empty line. Fortunately we can keep that
* silly corner case out of the main line of execution.
*/
if (result == NORMAL_ATTR)
@@ -1431,7 +1436,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (result == NORMAL_ATTR || *string != '\0')
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
if (result == END_OF_FILE)
{
/* EOF at start of line: all is well */
@@ -1442,7 +1447,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
else
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
}
/*
@@ -1475,8 +1480,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(0,
- &oid_in_function,
- oid_in_element,
+ &oid_in_function,
+ oid_in_element,
&isnull));
if (isnull || loaded_oid == InvalidOid)
ereport(ERROR,
@@ -1531,9 +1536,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
prmdata->isnull = (nulls[i] == 'n');
/*
- * Execute the constraint expression. Allow the expression
- * to replace the value (consider e.g. a timestamp precision
- * restriction).
+ * Execute the constraint expression. Allow the
+ * expression to replace the value (consider e.g. a
+ * timestamp precision restriction).
*/
values[i] = ExecEvalExpr(exprstate, econtext,
&isnull, NULL);
@@ -1674,11 +1679,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal carriage return found in data"),
- errhint("Use \"\\r\" to represent carriage return.")));
- /* Check for \r\n on first line, _and_ handle \r\n. */
+ errhint("Use \"\\r\" to represent carriage return.")));
+ /* Check for \r\n on first line, _and_ handle \r\n. */
if (copy_lineno == 1 || eol_type == EOL_CRNL)
{
- int c2 = CopyPeekChar();
+ int c2 = CopyPeekChar();
+
if (c2 == '\n')
{
CopyDonePeek(c2, true); /* eat newline */
@@ -1690,9 +1696,13 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
if (eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("literal carriage return found in data"),
+ errmsg("literal carriage return found in data"),
errhint("Use \"\\r\" to represent carriage return.")));
- /* if we got here, it is the first line and we didn't get \n, so put it back */
+
+ /*
+ * if we got here, it is the first line and we didn't
+ * get \n, so put it back
+ */
CopyDonePeek(c2, false);
eol_type = EOL_CR;
}
@@ -1802,12 +1812,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
c = CopyGetChar();
if (c == '\n')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker does not match previous newline style")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker does not match previous newline style")));
if (c != '\r')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker corrupt")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker corrupt")));
}
c = CopyGetChar();
if (c != '\r' && c != '\n')
@@ -1816,21 +1826,20 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
errmsg("end-of-copy marker corrupt")));
if ((eol_type == EOL_NL && c != '\n') ||
(eol_type == EOL_CRNL && c != '\n') ||
- (eol_type == EOL_CR && c != '\r'))
+ (eol_type == EOL_CR && c != '\r'))
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("end-of-copy marker does not match previous newline style")));
+
/*
- * In protocol version 3, we should ignore anything after
- * \. up to the protocol end of copy data. (XXX maybe
- * better not to treat \. as special?)
+ * In protocol version 3, we should ignore anything
+ * after \. up to the protocol end of copy data. (XXX
+ * maybe better not to treat \. as special?)
*/
if (copy_dest == COPY_NEW_FE)
{
while (c != EOF)
- {
c = CopyGetChar();
- }
}
*result = END_OF_FILE;
goto copy_eof;
@@ -2045,8 +2054,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
if (intMember(attnum, attnums))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" specified more than once",
- name)));
+ errmsg("attribute \"%s\" specified more than once",
+ name)));
attnums = lappendi(attnums, attnum);
}
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 70678b26b08..547f3fb2f3f 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.119 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.120 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,7 +200,7 @@ createdb(const CreatedbStmt *stmt)
if (dbpath != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use an alternate location on this platform")));
+ errmsg("cannot use an alternate location on this platform")));
#endif
/*
@@ -260,8 +260,8 @@ createdb(const CreatedbStmt *stmt)
if (DatabaseHasActiveBackends(src_dboid, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/* If encoding is defaulted, use source's encoding */
if (encoding < 0)
@@ -345,7 +345,7 @@ createdb(const CreatedbStmt *stmt)
/* Make the symlink, if needed */
if (alt_loc)
{
-#ifdef HAVE_SYMLINK /* already throws error above */
+#ifdef HAVE_SYMLINK /* already throws error above */
if (symlink(alt_loc, nominal_loc) != 0)
#endif
ereport(ERROR,
@@ -450,7 +450,7 @@ dropdb(const char *dbname)
char *nominal_loc;
char dbpath[MAXPGPATH];
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
@@ -503,8 +503,8 @@ dropdb(const char *dbname)
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- dbname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ dbname)));
/*
* Find the database's tuple by OID (should be unique).
@@ -577,10 +577,13 @@ dropdb(const char *dbname)
void
RenameDatabase(const char *oldname, const char *newname)
{
- HeapTuple tup, newtup;
+ HeapTuple tup,
+ newtup;
Relation rel;
- SysScanDesc scan, scan2;
- ScanKeyData key, key2;
+ SysScanDesc scan,
+ scan2;
+ ScanKeyData key,
+ key2;
/*
* Obtain AccessExclusiveLock so that no new session gets started
@@ -610,15 +613,14 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. Might
- * not be necessary, but it's consistent with other database
- * operations.
+ * Make sure the database does not have active sessions. Might not be
+ * necessary, but it's consistent with other database operations.
*/
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- oldname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ oldname)));
/* make sure the new name doesn't exist */
ScanKeyEntryInitialize(&key2, 0, Anum_pg_database_datname,
@@ -651,10 +653,10 @@ RenameDatabase(const char *oldname, const char *newname)
heap_close(rel, NoLock);
/*
- * Force dirty buffers out to disk, so that newly-connecting
- * backends will see the renamed database in pg_database right
- * away. (They'll see an uncommitted tuple, but they don't care;
- * see GetRawDatabaseInfo.)
+ * Force dirty buffers out to disk, so that newly-connecting backends
+ * will see the renamed database in pg_database right away. (They'll
+ * see an uncommitted tuple, but they don't care; see
+ * GetRawDatabaseInfo.)
*/
BufferSync();
}
@@ -671,7 +673,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
newtuple;
Relation rel;
ScanKeyData scankey;
- SysScanDesc scan;
+ SysScanDesc scan;
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
char repl_repl[Natts_pg_database];
@@ -689,9 +691,9 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
errmsg("database \"%s\" does not exist", stmt->dbname)));
if (!(superuser()
- || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
+ || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
- stmt->dbname);
+ stmt->dbname);
MemSet(repl_repl, ' ', sizeof(repl_repl));
repl_repl[Anum_pg_database_datconfig - 1] = 'r';
@@ -750,7 +752,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
Relation relation;
ScanKeyData scanKey;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple tuple;
bool gottuple;
@@ -862,7 +864,7 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
#ifndef ALLOW_ABSOLUTE_DBPATHS
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("absolute paths are not allowed as database locations")));
+ errmsg("absolute paths are not allowed as database locations")));
#endif
prefix = dbpath;
}
@@ -874,8 +876,8 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
if (!var)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("postmaster environment variable \"%s\" not found",
- dbpath)));
+ errmsg("postmaster environment variable \"%s\" not found",
+ dbpath)));
if (!is_absolute_path(var))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
@@ -955,7 +957,7 @@ get_database_oid(const char *dbname)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
Oid oid;
@@ -993,7 +995,7 @@ get_database_name(Oid dbid)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
char *result;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index bf0c95a75ef..c924dcc7b77 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.82 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.83 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -51,7 +51,8 @@ case_translate_language_name(const char *input, char *output)
{
int i;
- MemSet(output, 0, NAMEDATALEN); /* ensure result Name is zero-filled */
+ MemSet(output, 0, NAMEDATALEN); /* ensure result Name is
+ * zero-filled */
for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index bc137b0eaca..916c1ff772f 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.111 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.112 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,11 +45,11 @@ typedef struct ExplainState
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
TupOutputState *tstate);
-static double elapsed_time(struct timeval *starttime);
+static double elapsed_time(struct timeval * starttime);
static void explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
- Plan *outer_plan,
- int indent, ExplainState *es);
+ Plan *plan, PlanState * planstate,
+ Plan *outer_plan,
+ int indent, ExplainState *es);
static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
int scanrelid, Plan *outer_plan,
StringInfo str, int indent, ExplainState *es);
@@ -58,8 +58,8 @@ static void show_upper_qual(List *qual, const char *qlabel,
const char *inner_name, int inner_varno, Plan *inner_plan,
StringInfo str, int indent, ExplainState *es);
static void show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols,
- const char *qlabel,
- StringInfo str, int indent, ExplainState *es);
+ const char *qlabel,
+ StringInfo str, int indent, ExplainState *es);
static Node *make_ors_ands_explicit(List *orclauses);
/*
@@ -255,8 +255,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
}
/*
- * Close down the query and free resources. Include time for this
- * in the total runtime.
+ * Close down the query and free resources. Include time for this in
+ * the total runtime.
*/
gettimeofday(&starttime, NULL);
@@ -282,7 +282,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
/* Compute elapsed time in seconds since given gettimeofday() timestamp */
static double
-elapsed_time(struct timeval *starttime)
+elapsed_time(struct timeval * starttime)
{
struct timeval endtime;
@@ -313,7 +313,7 @@ elapsed_time(struct timeval *starttime)
*/
static void
explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
+ Plan *plan, PlanState * planstate,
Plan *outer_plan,
int indent, ExplainState *es)
{
@@ -542,8 +542,8 @@ explain_outNode(StringInfo str,
/*
* If the expression is still a function call, we can get
* the real name of the function. Otherwise, punt (this
- * can happen if the optimizer simplified away the function
- * call, for example).
+ * can happen if the optimizer simplified away the
+ * function call, for example).
*/
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
{
@@ -583,15 +583,13 @@ explain_outNode(StringInfo str,
double nloops = planstate->instrument->nloops;
appendStringInfo(str, " (actual time=%.2f..%.2f rows=%.0f loops=%.0f)",
- 1000.0 * planstate->instrument->startup / nloops,
- 1000.0 * planstate->instrument->total / nloops,
+ 1000.0 * planstate->instrument->startup / nloops,
+ 1000.0 * planstate->instrument->total / nloops,
planstate->instrument->ntuples / nloops,
planstate->instrument->nloops);
}
else if (es->printAnalyze)
- {
appendStringInfo(str, " (never executed)");
- }
}
appendStringInfoChar(str, '\n');
@@ -709,7 +707,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->initPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -807,7 +805,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->subPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -865,7 +863,7 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
*/
if (outer_plan)
{
- Relids varnos = pull_varnos(node);
+ Relids varnos = pull_varnos(node);
if (bms_is_member(OUTER, varnos))
outercontext = deparse_context_for_subplan("outer",
@@ -1037,9 +1035,7 @@ make_ors_ands_explicit(List *orclauses)
FastListInit(&args);
foreach(orptr, orclauses)
- {
FastAppend(&args, make_ands_explicit(lfirst(orptr)));
- }
return (Node *) make_orclause(FastListValue(&args));
}
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 7a6a3775d64..181f52e1143 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.31 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.32 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -80,8 +80,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot return shell type %s",
- TypeNameToString(returnType))));
+ errmsg("SQL function cannot return shell type %s",
+ TypeNameToString(returnType))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -147,8 +147,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (parameterCount >= FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("functions cannot have more than %d arguments",
- FUNC_MAX_ARGS)));
+ errmsg("functions cannot have more than %d arguments",
+ FUNC_MAX_ARGS)));
toid = LookupTypeName(t);
if (OidIsValid(toid))
@@ -159,8 +159,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot accept shell type %s",
- TypeNameToString(t))));
+ errmsg("SQL function cannot accept shell type %s",
+ TypeNameToString(t))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -330,8 +330,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized function attribute \"%s\" ignored",
- param->defname)));
+ errmsg("unrecognized function attribute \"%s\" ignored",
+ param->defname)));
}
}
@@ -558,7 +558,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(functionName)),
- errhint("Use DROP AGGREGATE to drop aggregate functions.")));
+ errhint("Use DROP AGGREGATE to drop aggregate functions.")));
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
{
@@ -664,7 +664,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
namespaceOid = procForm->pronamespace;
@@ -728,7 +728,7 @@ SetFunctionReturnType(Oid funcOid, Oid newRetType)
elog(ERROR, "cache lookup failed for function %u", funcOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
- if (procForm->prorettype != OPAQUEOID) /* caller messed up */
+ if (procForm->prorettype != OPAQUEOID) /* caller messed up */
elog(ERROR, "function %u doesn't return OPAQUE", funcOid);
/* okay to overwrite copied tuple */
@@ -815,7 +815,7 @@ CreateCast(CreateCastStmt *stmt)
if (sourcetypeid == targettypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("source data type and target data type are the same")));
+ errmsg("source data type and target data type are the same")));
/* No shells, no pseudo-types allowed */
if (!get_typisdefined(sourcetypeid))
@@ -878,10 +878,11 @@ CreateCast(CreateCastStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("return data type of cast function must match target data type")));
+
/*
* Restricting the volatility of a cast function may or may not be
* a good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -892,7 +893,7 @@ CreateCast(CreateCastStmt *stmt)
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must not be an aggregate function")));
+ errmsg("cast function must not be an aggregate function")));
if (procstruct->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -902,12 +903,12 @@ CreateCast(CreateCastStmt *stmt)
}
else
{
- int16 typ1len;
- int16 typ2len;
- bool typ1byval;
- bool typ2byval;
- char typ1align;
- char typ2align;
+ int16 typ1len;
+ int16 typ2len;
+ bool typ1byval;
+ bool typ2byval;
+ char typ1align;
+ char typ2align;
/* indicates binary coercibility */
funcid = InvalidOid;
@@ -924,7 +925,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Also, insist that the types match as to size, alignment, and
* pass-by-value attributes; this provides at least a crude check
- * that they have similar representations. A pair of types that
+ * that they have similar representations. A pair of types that
* fail this test should certainly not be equated.
*/
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
@@ -958,9 +959,9 @@ CreateCast(CreateCastStmt *stmt)
relation = heap_openr(CastRelationName, RowExclusiveLock);
/*
- * Check for duplicate. This is just to give a friendly error message,
- * the unique index would catch it anyway (so no need to sweat about
- * race conditions).
+ * Check for duplicate. This is just to give a friendly error
+ * message, the unique index would catch it anyway (so no need to
+ * sweat about race conditions).
*/
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 4cd66fd1b5d..5e3cec954d3 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.103 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.104 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,11 +44,11 @@
/* non-export function prototypes */
static void CheckPredicate(List *predList);
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static Oid GetIndexOpClass(List *opclass, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ char *accessMethodName, Oid accessMethodId);
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
/*
@@ -157,8 +157,8 @@ DefineIndex(RangeVar *heapRelation,
if (unique && !accessMethodForm->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support UNIQUE indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support UNIQUE indexes",
+ accessMethodName)));
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -192,16 +192,16 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Check that all of the attributes in a primary key are marked
- * as not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as not
+ * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
if (primary)
{
- List *keys;
+ List *keys;
foreach(keys, attributeList)
{
- IndexElem *key = (IndexElem *) lfirst(keys);
+ IndexElem *key = (IndexElem *) lfirst(keys);
HeapTuple atttuple;
if (!key->name)
@@ -216,15 +216,16 @@ DefineIndex(RangeVar *heapRelation,
atttuple = SearchSysCacheAttName(relationId, key->name);
if (HeapTupleIsValid(atttuple))
{
- if (! ((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
+ if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
{
/*
* Try to make it NOT NULL.
*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
* to child tables? Currently, since the PRIMARY KEY
- * itself doesn't cascade, we don't cascade the notnull
- * constraint either; but this is pretty debatable.
+ * itself doesn't cascade, we don't cascade the
+ * notnull constraint either; but this is pretty
+ * debatable.
*/
AlterTableAlterColumnSetNotNull(relationId, false,
key->name);
@@ -236,8 +237,8 @@ DefineIndex(RangeVar *heapRelation,
/* This shouldn't happen if parser did its job ... */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ key->name)));
}
}
}
@@ -248,7 +249,7 @@ DefineIndex(RangeVar *heapRelation,
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
- indexInfo->ii_Expressions = NIL; /* for now */
+ indexInfo->ii_Expressions = NIL; /* for now */
indexInfo->ii_ExpressionsState = NIL;
indexInfo->ii_Predicate = cnfPred;
indexInfo->ii_PredicateState = NIL;
@@ -308,7 +309,7 @@ CheckPredicate(List *predList)
if (contain_mutable_functions((Node *) predList))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("functions in index predicate must be marked IMMUTABLE")));
+ errmsg("functions in index predicate must be marked IMMUTABLE")));
}
static void
@@ -351,7 +352,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
else if (attribute->expr && IsA(attribute->expr, Var))
{
/* Tricky tricky, he wrote (column) ... treat as simple attr */
- Var *var = (Var *) attribute->expr;
+ Var *var = (Var *) attribute->expr;
indexInfo->ii_KeyAttrNumbers[attn] = var->varattno;
atttype = get_atttype(relId, var->varattno);
@@ -360,30 +361,30 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
{
/* Index expression */
Assert(attribute->expr != NULL);
- indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
+ indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
attribute->expr);
atttype = exprType(attribute->expr);
/*
- * We don't currently support generation of an actual query plan
- * for an index expression, only simple scalar expressions;
- * hence these restrictions.
+ * We don't currently support generation of an actual query
+ * plan for an index expression, only simple scalar
+ * expressions; hence these restrictions.
*/
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in index expression")));
+ errmsg("cannot use sub-select in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate in index expression")));
+ errmsg("cannot use aggregate in index expression")));
/*
* A expression using mutable functions is probably wrong,
- * since if you aren't going to get the same result for the same
- * data every time, it's not clear what the index entries mean at
- * all.
+ * since if you aren't going to get the same result for the
+ * same data every time, it's not clear what the index entries
+ * mean at all.
*/
if (contain_mutable_functions(attribute->expr))
ereport(ERROR,
@@ -413,21 +414,20 @@ GetIndexOpClass(List *opclass, Oid attrType,
opInputType;
/*
- * Release 7.0 removed network_ops, timespan_ops, and
- * datetime_ops, so we ignore those opclass names
- * so the default *_ops is used. This can be
- * removed in some later release. bjm 2000/02/07
+ * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
+ * we ignore those opclass names so the default *_ops is used. This
+ * can be removed in some later release. bjm 2000/02/07
*
- * Release 7.1 removes lztext_ops, so suppress that too
- * for a while. tgl 2000/07/30
+ * Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
+ * 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops,
- * so suppress that too for awhile. I'm starting to
- * think we need a better approach. tgl 2000/10/01
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
+ * too for awhile. I'm starting to think we need a better approach.
+ * tgl 2000/10/01
*/
if (length(opclass) == 1)
{
- char *claname = strVal(lfirst(opclass));
+ char *claname = strVal(lfirst(opclass));
if (strcmp(claname, "network_ops") == 0 ||
strcmp(claname, "timespan_ops") == 0 ||
@@ -499,8 +499,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opclass), format_type_be(attrType))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
@@ -607,7 +607,7 @@ ReindexIndex(RangeVar *indexRelation, bool force /* currently unused */ )
tuple = SearchSysCache(RELOID,
ObjectIdGetDatum(indOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", indOid);
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
@@ -785,7 +785,8 @@ ReindexDatabase(const char *dbname, bool force, bool all)
for (i = 0; i < relcnt; i++)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
if (reindex_relation(relids[i], force))
ereport(NOTICE,
(errmsg("relation %u was reindexed", relids[i])));
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 60b041466f1..52792bc31ab 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.15 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.16 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,13 +103,13 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Currently, we require superuser privileges to create an opclass.
* This seems necessary because we have no way to validate that the
* offered set of operators and functions are consistent with the AM's
- * expectations. It would be nice to provide such a check someday,
- * if it can be done without solving the halting problem :-(
+ * expectations. It would be nice to provide such a check someday, if
+ * it can be done without solving the halting problem :-(
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create an operator class")));
+ errmsg("must be superuser to create an operator class")));
/* Look up the datatype */
typeoid = typenameTypeId(stmt->datatype);
@@ -157,8 +157,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (operators[item->number - 1] != InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- item->number)));
+ errmsg("operator number %d appears more than once",
+ item->number)));
if (item->args != NIL)
{
TypeName *typeName1 = (TypeName *) lfirst(item->args);
@@ -211,7 +211,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (OidIsValid(storageoid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("storage type specified more than once")));
+ errmsg("storage type specified more than once")));
storageoid = typenameTypeId(item->storedtype);
break;
default:
@@ -532,7 +532,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
opcID = HeapTupleGetOid(tuple);
@@ -681,7 +681,7 @@ RenameOpClass(List *name, const char *access_method, const char *newname)
tup = SearchSysCacheCopy(CLAOID,
ObjectIdGetDatum(opcOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
namespaceOid = ((Form_pg_opclass) GETSTRUCT(tup))->opcnamespace;
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 6a4d479c121..ddc088fe2f5 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.10 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.11 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -103,7 +103,7 @@ DefineOperator(List *names, List *parameters)
if (typeName1->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
@@ -111,7 +111,7 @@ DefineOperator(List *names, List *parameters)
if (typeName2->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "procedure") == 0)
functionName = defGetQualifiedName(defel);
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index cf4a0638717..aa5a5b9ea61 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,17 +4,17 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
- *
+ *
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.19 2003/08/01 13:53:36 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.20 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@
* Execute SQL DECLARE CURSOR command.
*/
void
-PerformCursorOpen(DeclareCursorStmt *stmt)
+PerformCursorOpen(DeclareCursorStmt * stmt)
{
List *rewritten;
Query *query;
@@ -64,7 +64,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* The query has been through parse analysis, but not rewriting or
* planning as yet. Note that the grammar ensured we have a SELECT
- * query, so we are not expecting rule rewriting to do anything strange.
+ * query, so we are not expecting rule rewriting to do anything
+ * strange.
*/
rewritten = QueryRewrite((Query *) stmt->query);
if (length(rewritten) != 1 || !IsA(lfirst(rewritten), Query))
@@ -86,8 +87,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
plan = planner(query, true, stmt->options);
/*
- * Create a portal and copy the query and plan into its memory context.
- * (If a duplicate cursor name already exists, warn and drop it.)
+ * Create a portal and copy the query and plan into its memory
+ * context. (If a duplicate cursor name already exists, warn and drop
+ * it.)
*/
portal = CreatePortal(stmt->portalname, true, false);
@@ -98,7 +100,7 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
PortalDefineQuery(portal,
NULL, /* unfortunately don't have sourceText */
- "SELECT", /* cursor's query is always a SELECT */
+ "SELECT", /* cursor's query is always a SELECT */
makeList1(query),
makeList1(plan),
PortalGetHeapMemory(portal));
@@ -108,9 +110,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* Set up options for portal.
*
- * If the user didn't specify a SCROLL type, allow or disallow
- * scrolling based on whether it would require any additional
- * runtime overhead to do so.
+ * If the user didn't specify a SCROLL type, allow or disallow scrolling
+ * based on whether it would require any additional runtime overhead
+ * to do so.
*/
portal->cursorOptions = stmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -129,8 +131,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
Assert(portal->strategy == PORTAL_ONE_SELECT);
/*
- * We're done; the query won't actually be run until PerformPortalFetch
- * is called.
+ * We're done; the query won't actually be run until
+ * PerformPortalFetch is called.
*/
}
@@ -169,7 +171,7 @@ PerformPortalFetch(FetchStmt *stmt,
/* FIXME: shouldn't this be an ERROR? */
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("portal \"%s\" does not exist", stmt->portalname)));
+ errmsg("portal \"%s\" does not exist", stmt->portalname)));
if (completionTag)
strcpy(completionTag, stmt->ismove ? "MOVE 0" : "FETCH 0");
return;
@@ -219,7 +221,7 @@ PerformPortalClose(const char *name)
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("portal \"%s\" does not exist", name),
- errfunction("PerformPortalClose"))); /* for ecpg */
+ errfunction("PerformPortalClose"))); /* for ecpg */
return;
}
@@ -249,7 +251,8 @@ PortalCleanup(Portal portal, bool isError)
/*
* Shut down executor, if still running. We skip this during error
* abort, since other mechanisms will take care of releasing executor
- * resources, and we can't be sure that ExecutorEnd itself wouldn't fail.
+ * resources, and we can't be sure that ExecutorEnd itself wouldn't
+ * fail.
*/
queryDesc = PortalGetQueryDesc(portal);
if (queryDesc)
@@ -271,14 +274,14 @@ PortalCleanup(Portal portal, bool isError)
void
PersistHoldablePortal(Portal portal)
{
- QueryDesc *queryDesc = PortalGetQueryDesc(portal);
+ QueryDesc *queryDesc = PortalGetQueryDesc(portal);
MemoryContext savePortalContext;
MemoryContext saveQueryContext;
MemoryContext oldcxt;
/*
- * If we're preserving a holdable portal, we had better be
- * inside the transaction that originally created it.
+ * If we're preserving a holdable portal, we had better be inside the
+ * transaction that originally created it.
*/
Assert(portal->createXact == GetCurrentTransactionId());
Assert(queryDesc != NULL);
@@ -321,9 +324,8 @@ PersistHoldablePortal(Portal portal)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in
- * the tuplestore, so that subsequent backward FETCHs can be
- * processed.
+ * Rewind the executor: we need to store the entire result set in the
+ * tuplestore, so that subsequent backward FETCHs can be processed.
*/
ExecutorRewind(queryDesc);
@@ -351,17 +353,17 @@ PersistHoldablePortal(Portal portal)
/*
* Reset the position in the result set: ideally, this could be
* implemented by just skipping straight to the tuple # that we need
- * to be at, but the tuplestore API doesn't support that. So we
- * start at the beginning of the tuplestore and iterate through it
- * until we reach where we need to be. FIXME someday?
+ * to be at, but the tuplestore API doesn't support that. So we start
+ * at the beginning of the tuplestore and iterate through it until we
+ * reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
if (!portal->atEnd)
{
- long store_pos;
+ long store_pos;
- if (portal->posOverflow) /* oops, cannot trust portalPos */
+ if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not reposition held cursor")));
@@ -370,8 +372,8 @@ PersistHoldablePortal(Portal portal)
for (store_pos = 0; store_pos < portal->portalPos; store_pos++)
{
- HeapTuple tup;
- bool should_free;
+ HeapTuple tup;
+ bool should_free;
tup = tuplestore_gettuple(portal->holdStore, true,
&should_free);
@@ -389,8 +391,8 @@ PersistHoldablePortal(Portal portal)
/*
* We can now release any subsidiary memory of the portal's heap
* context; we'll never use it again. The executor already dropped
- * its context, but this will clean up anything that glommed onto
- * the portal's heap via PortalContext.
+ * its context, but this will clean up anything that glommed onto the
+ * portal's heap via PortalContext.
*/
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index cd58d7fc7b6..d0fabd1ad31 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.21 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.22 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(EState *estate,
- List *params, List *argtypes);
+ List *params, List *argtypes);
/*
* Implements the 'PREPARE' utility statement.
@@ -90,12 +90,12 @@ PrepareQuery(PrepareStmt *stmt)
/* Rewrite the query. The result could be 0, 1, or many queries. */
query_list = QueryRewrite(stmt->query);
- /* Generate plans for queries. Snapshot is already set. */
+ /* Generate plans for queries. Snapshot is already set. */
plan_list = pg_plan_queries(query_list, false);
/* Save the results. */
StorePreparedStatement(stmt->name,
- NULL, /* text form not available */
+ NULL, /* text form not available */
commandTag,
query_list,
plan_list,
@@ -131,8 +131,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
@@ -144,15 +144,15 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
portal = CreateNewPortal();
/*
- * For CREATE TABLE / AS EXECUTE, make a copy of the stored query
- * so that we can modify its destination (yech, but this has
- * always been ugly). For regular EXECUTE we can just use the
- * stored query where it sits, since the executor is read-only.
+ * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
+ * that we can modify its destination (yech, but this has always been
+ * ugly). For regular EXECUTE we can just use the stored query where
+ * it sits, since the executor is read-only.
*/
if (stmt->into)
{
MemoryContext oldContext;
- Query *query;
+ Query *query;
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
@@ -208,11 +208,11 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
static ParamListInfo
EvaluateParams(EState *estate, List *params, List *argtypes)
{
- int nargs = length(argtypes);
- ParamListInfo paramLI;
- List *exprstates;
- List *l;
- int i = 0;
+ int nargs = length(argtypes);
+ ParamListInfo paramLI;
+ List *exprstates;
+ List *l;
+ int i = 0;
/* Parser should have caught this error, but check for safety */
if (length(params) != nargs)
@@ -229,7 +229,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
paramLI[i].kind = PARAM_NUM;
@@ -273,7 +273,7 @@ InitQueryHashTable(void)
* to the hash entry, so the caller can dispose of their copy.
*
* Exception: commandTag is presumed to be a pointer to a constant string,
- * or possibly NULL, so it need not be copied. Note that commandTag should
+ * or possibly NULL, so it need not be copied. Note that commandTag should
* be NULL only if the original query (before rewriting) was empty.
*/
void
@@ -367,9 +367,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError)
if (prepared_queries)
{
/*
- * We can't just use the statement name as supplied by the user: the
- * hash package is picky enough that it needs to be NULL-padded out to
- * the appropriate length to work correctly.
+ * We can't just use the statement name as supplied by the user:
+ * the hash package is picky enough that it needs to be
+ * NULL-padded out to the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, stmt_name, sizeof(key));
@@ -412,9 +412,9 @@ FetchPreparedStatementParams(const char *stmt_name)
* Note: the result is created or copied into current memory context.
*/
TupleDesc
-FetchPreparedStatementResultDesc(PreparedStatement *stmt)
+FetchPreparedStatementResultDesc(PreparedStatement * stmt)
{
- Query *query;
+ Query *query;
switch (ChoosePortalStrategy(stmt->query_list))
{
@@ -476,7 +476,7 @@ DropPreparedStatement(const char *stmt_name, bool showError)
void
ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
{
- ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
+ ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
PreparedStatement *entry;
List *l,
*query_list,
@@ -499,8 +499,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, execstmt->params,
@@ -510,8 +510,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
/* Explain each query */
foreach(l, query_list)
{
- Query *query = (Query *) lfirst(l);
- Plan *plan = (Plan *) lfirst(plan_list);
+ Query *query = (Query *) lfirst(l);
+ Plan *plan = (Plan *) lfirst(plan_list);
bool is_last_query;
plan_list = lnext(plan_list);
@@ -533,7 +533,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("prepared statement is not a SELECT")));
+ errmsg("prepared statement is not a SELECT")));
/* Copy the query so we can modify it */
query = copyObject(query);
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 69000b29bc7..b0a4702a715 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.47 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.48 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create procedural language")));
+ errmsg("must be superuser to create procedural language")));
/*
* Translate the language name and check that this language doesn't
@@ -85,7 +85,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER.
*/
@@ -183,7 +183,7 @@ DropProceduralLanguage(DropPLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to drop procedural language")));
+ errmsg("must be superuser to drop procedural language")));
/*
* Translate the language name, check that this language exist and is
@@ -225,7 +225,7 @@ DropProceduralLanguageById(Oid langOid)
langTup = SearchSysCache(LANGOID,
ObjectIdGetDatum(langOid),
0, 0, 0);
- if (!HeapTupleIsValid(langTup)) /* should not happen */
+ if (!HeapTupleIsValid(langTup)) /* should not happen */
elog(ERROR, "cache lookup failed for language %u", langOid);
simple_heap_delete(rel, &langTup->t_self);
@@ -266,7 +266,7 @@ RenameLanguage(const char *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename procedural language")));
+ errmsg("must be superuser to rename procedural language")));
/* rename */
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 5ad81634f41..4eb285daa33 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.14 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.15 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* Create the schema's namespace */
namespaceId = NamespaceCreate(schemaName, owner_userid);
@@ -215,7 +215,7 @@ RemoveSchemaById(Oid schemaOid)
tup = SearchSysCache(NAMESPACEOID,
ObjectIdGetDatum(schemaOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for namespace %u", schemaOid);
simple_heap_delete(relation, &tup->t_self);
@@ -248,9 +248,9 @@ RenameSchema(const char *oldname, const char *newname)
/* make sure the new name doesn't exist */
if (HeapTupleIsValid(
- SearchSysCache(NAMESPACENAME,
- CStringGetDatum(newname),
- 0, 0, 0)))
+ SearchSysCache(NAMESPACENAME,
+ CStringGetDatum(newname),
+ 0, 0, 0)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_SCHEMA),
errmsg("schema \"%s\" already exists", newname)));
@@ -270,7 +270,7 @@ RenameSchema(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* rename */
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 7ce7810fbca..01544a015b3 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.99 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.100 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
static void init_sequence(RangeVar *relation,
- SeqTable *p_elm, Relation *p_rel);
+ SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, Form_pg_sequence new);
static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
@@ -97,10 +97,10 @@ DefineSequence(CreateSeqStmt *seq)
/* Values are NULL (or false) by default */
new.last_value = 0;
new.increment_by = 0;
- new.max_value = 0;
+ new.max_value = 0;
new.min_value = 0;
new.cache_value = 0;
- new.is_cycled = false;
+ new.is_cycled = false;
/* Check and set values */
init_params(seq->options, &new);
@@ -299,10 +299,10 @@ DefineSequence(CreateSeqStmt *seq)
/*
* AlterSequence
*
- * Modify the defition of a sequence relation
+ * Modify the defition of a sequence relation
*/
void
-AlterSequence(AlterSeqStmt *stmt)
+AlterSequence(AlterSeqStmt * stmt)
{
SeqTable elm;
Relation seqrel;
@@ -324,7 +324,7 @@ AlterSequence(AlterSeqStmt *stmt)
page = BufferGetPage(buf);
new.increment_by = seq->increment_by;
- new.max_value = seq->max_value;
+ new.max_value = seq->max_value;
new.min_value = seq->min_value;
new.cache_value = seq->cache_value;
new.is_cycled = seq->is_cycled;
@@ -346,9 +346,9 @@ AlterSequence(AlterSeqStmt *stmt)
}
/* save info in local cache */
- elm->last = new.last_value; /* last returned number */
- elm->cached = new.last_value; /* last cached number (forget cached
- * values) */
+ elm->last = new.last_value; /* last returned number */
+ elm->cached = new.last_value; /* last cached number (forget
+ * cached values) */
START_CRIT_SECTION();
@@ -494,9 +494,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MAXVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MAXVALUE (%s)",
+ sequence->relname, buf)));
}
next = minv;
}
@@ -517,9 +517,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MINVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MINVALUE (%s)",
+ sequence->relname, buf)));
}
next = maxv;
}
@@ -895,9 +895,9 @@ init_params(List *options, Form_pg_sequence new)
errmsg("conflicting or redundant options")));
increment_by = defel;
}
+
/*
- * start is for a new sequence
- * restart is for alter
+ * start is for a new sequence restart is for alter
*/
else if (strcmp(defel->defname, "start") == 0 ||
strcmp(defel->defname, "restart") == 0)
@@ -963,9 +963,9 @@ init_params(List *options, Form_pg_sequence new)
|| (max_value != (DefElem *) NULL && !max_value->arg))
{
if (new->increment_by > 0)
- new->max_value = SEQ_MAXVALUE; /* ascending seq */
+ new->max_value = SEQ_MAXVALUE; /* ascending seq */
else
- new->max_value = -1; /* descending seq */
+ new->max_value = -1; /* descending seq */
}
else if (max_value != (DefElem *) NULL)
new->max_value = defGetInt64(max_value);
@@ -975,9 +975,9 @@ init_params(List *options, Form_pg_sequence new)
|| (min_value != (DefElem *) NULL && !min_value->arg))
{
if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
+ new->min_value = 1; /* ascending seq */
else
- new->min_value = SEQ_MINVALUE; /* descending seq */
+ new->min_value = SEQ_MINVALUE; /* descending seq */
}
else if (min_value != (DefElem *) NULL)
new->min_value = defGetInt64(min_value);
@@ -996,7 +996,7 @@ init_params(List *options, Form_pg_sequence new)
}
/* START WITH */
- if (new->last_value == 0 && last_value == (DefElem *) NULL)
+ if (new->last_value == 0 && last_value == (DefElem *) NULL)
{
if (new->increment_by > 0)
new->last_value = new->min_value; /* ascending seq */
@@ -1015,8 +1015,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be less than MINVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be less than MINVALUE (%s)",
+ bufs, bufm)));
}
if (new->last_value > new->max_value)
{
@@ -1027,8 +1027,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index b3108053d9d..6e503fdac54 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.76 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.77 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,18 +57,19 @@
*/
typedef struct OnCommitItem
{
- Oid relid; /* relid of relation */
- OnCommitAction oncommit; /* what to do at end of xact */
+ Oid relid; /* relid of relation */
+ OnCommitAction oncommit; /* what to do at end of xact */
/*
* If this entry was created during this xact, it should be deleted at
* xact abort. Conversely, if this entry was deleted during this
* xact, it should be removed at xact commit. We leave deleted
- * entries in the list until commit so that we can roll back if needed.
+ * entries in the list until commit so that we can roll back if
+ * needed.
*/
bool created_in_cur_xact;
bool deleted_in_cur_xact;
-} OnCommitItem;
+} OnCommitItem;
static List *on_commits = NIL;
@@ -82,14 +83,14 @@ static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static bool needs_toast_table(Relation rel);
static void AlterTableAddCheckConstraint(Relation rel, Constraint *constr);
static void AlterTableAddForeignKeyConstraint(Relation rel,
- FkConstraint *fkconstraint);
+ FkConstraint *fkconstraint);
static int transformColumnNameList(Oid relId, List *colList,
- int16 *attnums, Oid *atttypids);
+ int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
- List **attnamelist,
- int16 *attnums, Oid *atttypids);
-static Oid transformFkeyCheckAttrs(Relation pkrel,
- int numattrs, int16 *attnums);
+ List **attnamelist,
+ int16 *attnums, Oid *atttypids);
+static Oid transformFkeyCheckAttrs(Relation pkrel,
+ int numattrs, int16 *attnums);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
@@ -206,8 +207,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (strcmp(check[i].ccname, cdef->name) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("duplicate CHECK constraint name \"%s\"",
- cdef->name)));
+ errmsg("duplicate CHECK constraint name \"%s\"",
+ cdef->name)));
}
check[ncheck].ccname = cdef->name;
}
@@ -399,7 +400,7 @@ TruncateRelation(const RangeVar *relation)
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temp tables of other processes")));
+ errmsg("cannot truncate temp tables of other processes")));
/*
* Don't allow truncate on tables which are referenced by foreign keys
@@ -435,8 +436,8 @@ TruncateRelation(const RangeVar *relation)
heap_close(fkeyRel, AccessShareLock);
/*
- * Do the real work using the same technique as cluster, but
- * without the data-copying portion
+ * Do the real work using the same technique as cluster, but without
+ * the data-copying portion
*/
rebuild_relation(rel, InvalidOid);
@@ -570,8 +571,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation \"%s\"",
- parent->relname)));
+ errmsg("cannot inherit from temporary relation \"%s\"",
+ parent->relname)));
/*
* We should have an UNDER permission flag for this, but for now,
@@ -652,7 +653,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- format_type_be(attribute->atttypid))));
+ format_type_be(attribute->atttypid))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
def->is_not_null |= attribute->attnotnull;
@@ -803,11 +804,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
def->typename->typmod != newdef->typename->typmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("attribute \"%s\" has a type conflict",
- attributeName),
+ errmsg("attribute \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- TypeNameToString(newdef->typename))));
+ TypeNameToString(newdef->typename))));
/* Mark the column as locally defined */
def->is_local = true;
/* Merge of NOT NULL constraints = OR 'em together */
@@ -1230,8 +1231,8 @@ renameatt(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- newattname, RelationGetRelationName(targetrelation))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
@@ -1257,7 +1258,7 @@ renameatt(Oid myrelid,
/*
* Scan through index columns to see if there's any simple index
- * entries for this attribute. We ignore expressional entries.
+ * entries for this attribute. We ignore expressional entries.
*/
indextup = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -1270,6 +1271,7 @@ renameatt(Oid myrelid,
{
if (attnum != indexform->indkey[i])
continue;
+
/*
* Found one, rename it.
*/
@@ -1279,6 +1281,7 @@ renameatt(Oid myrelid,
0, 0);
if (!HeapTupleIsValid(atttup))
continue; /* should we raise an error? */
+
/*
* Update the (copied) attribute tuple.
*/
@@ -1366,7 +1369,7 @@ renamerel(Oid myrelid, const char *newrelname)
reltup = SearchSysCacheCopy(RELOID,
PointerGetDatum(myrelid),
0, 0, 0);
- if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
+ if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", myrelid);
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
@@ -1743,7 +1746,7 @@ AlterTableAddColumn(Oid myrelid,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- get_rel_name(childrelid), colDef->colname)));
+ get_rel_name(childrelid), colDef->colname)));
/*
* XXX if we supported NOT NULL or defaults, would need to do
@@ -1782,7 +1785,7 @@ AlterTableAddColumn(Oid myrelid,
if (find_inheritance_children(myrelid) != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute must be added to child tables too")));
+ errmsg("attribute must be added to child tables too")));
}
/*
@@ -1801,14 +1804,14 @@ AlterTableAddColumn(Oid myrelid,
if (colDef->raw_default || colDef->cooked_default)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("adding columns with defaults is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
+ errmsg("adding columns with defaults is not implemented"),
+ errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
if (colDef->is_not_null)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("adding NOT NULL columns is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
+ errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1829,8 +1832,8 @@ AlterTableAddColumn(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- colDef->colname, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ colDef->colname, RelationGetRelationName(rel))));
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
@@ -2014,8 +2017,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2057,8 +2060,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (indexStruct->indkey[i] == attnum)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute \"%s\" is in a primary key",
- colName)));
+ errmsg("attribute \"%s\" is in a primary key",
+ colName)));
}
}
@@ -2158,8 +2161,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2286,8 +2289,8 @@ AlterTableAlterColumnDefault(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2450,8 +2453,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum < 0)
@@ -2476,8 +2479,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("column datatype %s can only have storage \"plain\"",
- format_type_be(attrtuple->atttypid))));
+ errmsg("column datatype %s can only have storage \"plain\"",
+ format_type_be(attrtuple->atttypid))));
}
simple_heap_update(attrelation, &tuple->t_self, tuple);
@@ -2573,7 +2576,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
(errmsg("table \"%s\" is already WITHOUT OIDS",
RelationGetRelationName(rel))));
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
return;
}
@@ -2601,8 +2604,8 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
attrel = heap_open(RelOid_pg_attribute, RowExclusiveLock);
/*
- * Oids are being removed from the relation, so we need
- * to remove the oid pg_attribute record relating.
+ * Oids are being removed from the relation, so we need to remove
+ * the oid pg_attribute record relating.
*/
atttup = SearchSysCache(ATTNUM,
ObjectIdGetDatum(myrelid),
@@ -2621,7 +2624,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
}
/*
@@ -2663,8 +2666,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Can't drop a system attribute */
/* XXX perhaps someday allow dropping OID? */
@@ -2712,7 +2715,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
childatt->attinhcount--;
@@ -2731,9 +2734,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
}
/*
- * Propagate to children if desired. Unlike most other ALTER routines,
- * we have to do this one level of recursion at a time; we can't use
- * find_all_inheritors to do it in one pass.
+ * Propagate to children if desired. Unlike most other ALTER
+ * routines, we have to do this one level of recursion at a time; we
+ * can't use find_all_inheritors to do it in one pass.
*/
if (recurse)
{
@@ -2763,7 +2766,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
@@ -2882,18 +2885,18 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetNamespace(rel),
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
constr->name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetRelid(rel),
+ RelationGetNamespace(rel),
&counter);
/*
@@ -2923,14 +2926,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
if (fkconstraint->constr_name)
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
+ RelationGetRelid(rel),
RelationGetNamespace(rel),
fkconstraint->constr_name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
fkconstraint->constr_name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
fkconstraint->constr_name = GenerateConstraintName(CONSTRAINT_RELATION,
@@ -2959,7 +2962,7 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
/*
* Add a check constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -2979,13 +2982,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
Node *expr;
/*
- * We need to make a parse state and range
- * table to allow us to do transformExpr()
+ * We need to make a parse state and range table to allow us to do
+ * transformExpr()
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(pstate,
RelationGetRelid(rel),
- makeAlias(RelationGetRelationName(rel), NIL),
+ makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, true, true);
@@ -3006,8 +3009,8 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (length(pstate->p_rtable) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("CHECK constraint may only reference relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("CHECK constraint may only reference relation \"%s\"",
+ RelationGetRelationName(rel))));
/*
* No subplans or aggregates, either...
@@ -3070,15 +3073,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (!successful)
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("CHECK constraint \"%s\" is violated at some row(s)",
- constr->name)));
+ errmsg("CHECK constraint \"%s\" is violated at some row(s)",
+ constr->name)));
/*
- * Call AddRelationRawConstraints to do
- * the real adding -- It duplicates some
- * of the above, but does not check the
- * validity of the constraint against
- * tuples already in the table.
+ * Call AddRelationRawConstraints to do the real adding -- It
+ * duplicates some of the above, but does not check the validity of
+ * the constraint against tuples already in the table.
*/
AddRelationRawConstraints(rel, NIL, makeList1(constr));
}
@@ -3086,7 +3087,7 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -3106,12 +3107,11 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
Oid constrOid;
/*
- * Grab an exclusive lock on the pk table, so that
- * someone doesn't delete rows out from under us.
- * (Although a lesser lock would do for that purpose,
- * we'll need exclusive lock anyway to add triggers to
- * the pk table; trying to start with a lesser lock
- * will just create a risk of deadlock.)
+ * Grab an exclusive lock on the pk table, so that someone doesn't
+ * delete rows out from under us. (Although a lesser lock would do for
+ * that purpose, we'll need exclusive lock anyway to add triggers to
+ * the pk table; trying to start with a lesser lock will just create a
+ * risk of deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
@@ -3152,8 +3152,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
errmsg("cannot reference temporary table from permanent table constraint")));
/*
- * Look up the referencing attributes to make sure they
- * exist, and record their attnums and type OIDs.
+ * Look up the referencing attributes to make sure they exist, and
+ * record their attnums and type OIDs.
*/
for (i = 0; i < INDEX_MAX_KEYS; i++)
{
@@ -3166,10 +3166,10 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkattnum, fktypoid);
/*
- * If the attribute list for the referenced table was omitted,
- * lookup the definition of the primary key and use it. Otherwise,
- * validate the supplied attribute list. In either case, discover
- * the index OID and the attnums and type OIDs of the attributes.
+ * If the attribute list for the referenced table was omitted, lookup
+ * the definition of the primary key and use it. Otherwise, validate
+ * the supplied attribute list. In either case, discover the index
+ * OID and the attnums and type OIDs of the attributes.
*/
if (fkconstraint->pk_attrs == NIL)
{
@@ -3208,8 +3208,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
}
/*
- * Check that the constraint is satisfied by existing
- * rows (we can skip this during table creation).
+ * Check that the constraint is satisfied by existing rows (we can
+ * skip this during table creation).
*/
if (!fkconstraint->skip_validation)
validateForeignKeyConstraint(fkconstraint, rel, pkrel);
@@ -3225,7 +3225,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
RelationGetRelid(rel),
fkattnum,
numfks,
- InvalidOid, /* not a domain constraint */
+ InvalidOid, /* not a domain
+ * constraint */
RelationGetRelid(pkrel),
pkattnum,
numpks,
@@ -3233,7 +3234,7 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkconstraint->fk_del_action,
fkconstraint->fk_matchtype,
indexOid,
- NULL, /* no check constraint */
+ NULL, /* no check constraint */
NULL,
NULL);
@@ -3276,8 +3277,8 @@ transformColumnNameList(Oid relId, List *colList,
if (attnum >= INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
- errmsg("cannot have more than %d keys in a foreign key",
- INDEX_MAX_KEYS)));
+ errmsg("cannot have more than %d keys in a foreign key",
+ INDEX_MAX_KEYS)));
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
ReleaseSysCache(atttuple);
@@ -3291,7 +3292,7 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Used when the column list in the REFERENCES specification
+ * for the pkrel. Used when the column list in the REFERENCES specification
* is omitted.
*/
static int
@@ -3339,12 +3340,12 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
+ RelationGetRelationName(pkrel))));
/*
- * Now build the list of PK attributes from the indkey definition
- * (we assume a primary key cannot have expressional elements)
+ * Now build the list of PK attributes from the indkey definition (we
+ * assume a primary key cannot have expressional elements)
*/
*attnamelist = NIL;
for (i = 0; i < indexStruct->indnatts; i++)
@@ -3389,7 +3390,8 @@ transformFkeyCheckAttrs(Relation pkrel,
{
HeapTuple indexTuple;
Form_pg_index indexStruct;
- int i, j;
+ int i,
+ j;
indexoid = lfirsto(indexoidscan);
indexTuple = SearchSysCache(INDEXRELID,
@@ -3453,7 +3455,7 @@ transformFkeyCheckAttrs(Relation pkrel,
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
errmsg("there is no UNIQUE constraint matching given keys for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ RelationGetRelationName(pkrel))));
freeList(indexoidlist);
@@ -3969,17 +3971,17 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
void
AlterTableClusterOn(Oid relOid, const char *indexName)
{
- Relation rel,
- pg_index;
- List *index;
- Oid indexOid;
- HeapTuple indexTuple;
- Form_pg_index indexForm;
-
+ Relation rel,
+ pg_index;
+ List *index;
+ Oid indexOid;
+ HeapTuple indexTuple;
+ Form_pg_index indexForm;
+
rel = heap_open(relOid, AccessExclusiveLock);
indexOid = get_relname_relid(indexName, rel->rd_rel->relnamespace);
-
+
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -3994,36 +3996,37 @@ AlterTableClusterOn(Oid relOid, const char *indexName)
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
/*
- * If this is the same index the relation was previously
- * clustered on, no need to do anything.
+ * If this is the same index the relation was previously clustered on,
+ * no need to do anything.
*/
if (indexForm->indisclustered)
{
ereport(NOTICE,
- (errmsg("table \"%s\" is already being clustered on index \"%s\"",
- NameStr(rel->rd_rel->relname), indexName)));
+ (errmsg("table \"%s\" is already being clustered on index \"%s\"",
+ NameStr(rel->rd_rel->relname), indexName)));
ReleaseSysCache(indexTuple);
heap_close(rel, NoLock);
return;
}
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
-
+
/*
* Now check each index in the relation and set the bit where needed.
*/
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index idxForm;
-
+ HeapTuple idxtuple;
+ Form_pg_index idxForm;
+
indexOid = lfirsto(index);
idxtuple = SearchSysCacheCopy(INDEXRELID,
- ObjectIdGetDatum(indexOid),
+ ObjectIdGetDatum(indexOid),
0, 0, 0);
if (!HeapTupleIsValid(idxtuple))
elog(ERROR, "cache lookup failed for index %u", indexOid);
idxForm = (Form_pg_index) GETSTRUCT(idxtuple);
+
/*
* Unset the bit if set. We know it's wrong because we checked
* this earlier.
@@ -4100,7 +4103,7 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared relations cannot be toasted after initdb")));
+ errmsg("shared relations cannot be toasted after initdb")));
/*
* Is it already toasted?
@@ -4331,12 +4334,12 @@ needs_toast_table(Relation rel)
void
register_on_commit_action(Oid relid, OnCommitAction action)
{
- OnCommitItem *oc;
+ OnCommitItem *oc;
MemoryContext oldcxt;
/*
- * We needn't bother registering the relation unless there is an ON COMMIT
- * action we need to take.
+ * We needn't bother registering the relation unless there is an ON
+ * COMMIT action we need to take.
*/
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
@@ -4366,7 +4369,7 @@ remove_on_commit_action(Oid relid)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (oc->relid == relid)
{
@@ -4389,7 +4392,7 @@ PreCommit_on_commit_actions(void)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
/* Ignore entry if already dropped in this xact */
if (oc->deleted_in_cur_xact)
@@ -4403,23 +4406,25 @@ PreCommit_on_commit_actions(void)
break;
case ONCOMMIT_DELETE_ROWS:
heap_truncate(oc->relid);
- CommandCounterIncrement(); /* XXX needed? */
+ CommandCounterIncrement(); /* XXX needed? */
break;
case ONCOMMIT_DROP:
- {
- ObjectAddress object;
+ {
+ ObjectAddress object;
- object.classId = RelOid_pg_class;
- object.objectId = oc->relid;
- object.objectSubId = 0;
- performDeletion(&object, DROP_CASCADE);
- /*
- * Note that table deletion will call remove_on_commit_action,
- * so the entry should get marked as deleted.
- */
- Assert(oc->deleted_in_cur_xact);
- break;
- }
+ object.classId = RelOid_pg_class;
+ object.objectId = oc->relid;
+ object.objectSubId = 0;
+ performDeletion(&object, DROP_CASCADE);
+
+ /*
+ * Note that table deletion will call
+ * remove_on_commit_action, so the entry should get
+ * marked as deleted.
+ */
+ Assert(oc->deleted_in_cur_xact);
+ break;
+ }
}
}
}
@@ -4442,7 +4447,7 @@ AtEOXact_on_commit_actions(bool isCommit)
l = on_commits;
while (l != NIL)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (isCommit ? oc->deleted_in_cur_xact :
oc->created_in_cur_xact)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 6e5b38804ff..d3e969c7e4f 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.153 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.154 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,17 +41,17 @@
static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx);
static HeapTuple GetTupleForTrigger(EState *estate,
- ResultRelInfo *relinfo,
- ItemPointer tid,
- CommandId cid,
- TupleTableSlot **newSlot);
+ ResultRelInfo *relinfo,
+ ItemPointer tid,
+ CommandId cid,
+ TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
FmgrInfo *finfo,
MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
+ bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context);
@@ -97,18 +97,19 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
else if (stmt->isconstraint)
{
/*
- * If this trigger is a constraint (and a foreign key one)
- * then we really need a constrrelid. Since we don't have one,
- * we'll try to generate one from the argument information.
+ * If this trigger is a constraint (and a foreign key one) then we
+ * really need a constrrelid. Since we don't have one, we'll try
+ * to generate one from the argument information.
*
- * This is really just a workaround for a long-ago pg_dump bug
- * that omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
- * commands. We don't want to bomb out completely here if we can't
- * determine the correct relation, because that would prevent loading
- * the dump file. Instead, NOTICE here and ERROR in the trigger.
+ * This is really just a workaround for a long-ago pg_dump bug that
+ * omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
+ * commands. We don't want to bomb out completely here if we
+ * can't determine the correct relation, because that would
+ * prevent loading the dump file. Instead, NOTICE here and ERROR
+ * in the trigger.
*/
- bool needconstrrelid = false;
- void *elem = NULL;
+ bool needconstrrelid = false;
+ void *elem = NULL;
if (strncmp(strVal(llast(stmt->funcname)), "RI_FKey_check_", 14) == 0)
{
@@ -265,8 +266,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
found++;
}
systable_endscan(tgscan);
@@ -280,7 +281,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -480,8 +481,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, get_rel_name(relid))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, get_rel_name(relid))));
if (!pg_class_ownercheck(relid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
@@ -576,7 +577,7 @@ RemoveTriggerById(Oid trigOid)
elog(ERROR, "cache lookup failed for relation %u", relid);
classForm = (Form_pg_class) GETSTRUCT(tuple);
- if (classForm->reltriggers == 0) /* should not happen */
+ if (classForm->reltriggers == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has reltriggers = 0",
RelationGetRelationName(rel));
classForm->reltriggers--;
@@ -650,8 +651,8 @@ renametrig(Oid relid,
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
@@ -693,8 +694,8 @@ renametrig(Oid relid,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ oldname, RelationGetRelationName(targetrel))));
}
systable_endscan(tgscan);
@@ -762,7 +763,7 @@ RelationBuildTriggers(Relation relation)
build->tgoid = HeapTupleGetOid(htup);
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname)));
+ NameGetDatum(&pg_trigger->tgname)));
build->tgfoid = pg_trigger->tgfoid;
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
@@ -927,8 +928,8 @@ CopyTriggerDesc(TriggerDesc *trigdesc)
trigger->tgname = pstrdup(trigger->tgname);
if (trigger->tgnargs > 0)
{
- char **newargs;
- int16 j;
+ char **newargs;
+ int16 j;
newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
for (j = 0; j < trigger->tgnargs; j++)
@@ -1101,7 +1102,7 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
return false;
return true;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* Call a trigger function.
@@ -1166,10 +1167,10 @@ ExecCallTriggerFunc(TriggerData *trigdata,
void
ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1190,10 +1191,10 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1209,7 +1210,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1242,8 +1243,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1279,10 +1280,10 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1303,10 +1304,10 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1322,7 +1323,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1361,8 +1362,8 @@ ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1408,10 +1409,10 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1432,10 +1433,10 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1451,7 +1452,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1498,8 +1499,8 @@ ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
for (i = 0; i < ntrigs; i++)
{
@@ -1639,19 +1640,20 @@ ltrmark:;
* ----------
*/
-typedef struct DeferredTriggersData {
- /* Internal data is held in a per-transaction memory context */
- MemoryContext deftrig_cxt;
- /* ALL DEFERRED or ALL IMMEDIATE */
- bool deftrig_all_isset;
- bool deftrig_all_isdeferred;
- /* Per trigger state */
- List *deftrig_trigstates;
- /* List of pending deferred triggers. Previous comment below */
- DeferredTriggerEvent deftrig_events;
- DeferredTriggerEvent deftrig_events_imm;
- DeferredTriggerEvent deftrig_event_tail;
-} DeferredTriggersData;
+typedef struct DeferredTriggersData
+{
+ /* Internal data is held in a per-transaction memory context */
+ MemoryContext deftrig_cxt;
+ /* ALL DEFERRED or ALL IMMEDIATE */
+ bool deftrig_all_isset;
+ bool deftrig_all_isdeferred;
+ /* Per trigger state */
+ List *deftrig_trigstates;
+ /* List of pending deferred triggers. Previous comment below */
+ DeferredTriggerEvent deftrig_events;
+ DeferredTriggerEvent deftrig_events_imm;
+ DeferredTriggerEvent deftrig_event_tail;
+} DeferredTriggersData;
/* ----------
* deftrig_events, deftrig_event_tail:
@@ -1661,8 +1663,8 @@ typedef struct DeferredTriggersData {
* Because this can grow pretty large, we don't use separate List nodes,
* but instead thread the list through the dte_next fields of the member
* nodes. Saves just a few bytes per entry, but that adds up.
- *
- * deftrig_events_imm holds the tail pointer as of the last
+ *
+ * deftrig_events_imm holds the tail pointer as of the last
* deferredTriggerInvokeEvents call; we can use this to avoid rescanning
* entries unnecessarily. It is NULL if deferredTriggerInvokeEvents
* hasn't run since the last state change.
@@ -1674,7 +1676,7 @@ typedef struct DeferredTriggersData {
typedef DeferredTriggersData *DeferredTriggers;
-static DeferredTriggers deferredTriggers;
+static DeferredTriggers deferredTriggers;
/* ----------
* deferredTriggerCheckState()
@@ -1783,7 +1785,7 @@ deferredTriggerAddEvent(DeferredTriggerEvent event)
*/
static void
DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context)
{
Oid tgoid = event->dte_item[itemno].dti_tgoid;
@@ -1817,7 +1819,7 @@ DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
*/
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
- (event->dte_event & TRIGGER_EVENT_ROW);
+ (event->dte_event & TRIGGER_EVENT_ROW);
LocTriggerData.tg_relation = rel;
LocTriggerData.tg_trigger = NULL;
@@ -1899,12 +1901,12 @@ deferredTriggerInvokeEvents(bool immediate_only)
* are going to discard the whole event queue on return anyway, so no
* need to bother with "retail" pfree's.
*
- * If immediate_only is true, we need only scan from where the end of
- * the queue was at the previous deferredTriggerInvokeEvents call;
- * any non-deferred events before that point are already fired.
- * (But if the deferral state changes, we must reset the saved position
- * to the beginning of the queue, so as to process all events once with
- * the new states. See DeferredTriggerSetState.)
+ * If immediate_only is true, we need only scan from where the end of the
+ * queue was at the previous deferredTriggerInvokeEvents call; any
+ * non-deferred events before that point are already fired. (But if
+ * the deferral state changes, we must reset the saved position to the
+ * beginning of the queue, so as to process all events once with the
+ * new states. See DeferredTriggerSetState.)
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1916,9 +1918,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * If immediate_only is true, then the only events that could need firing
- * are those since deftrig_events_imm. (But if deftrig_events_imm is
- * NULL, we must scan the entire list.)
+ * If immediate_only is true, then the only events that could need
+ * firing are those since deftrig_events_imm. (But if
+ * deftrig_events_imm is NULL, we must scan the entire list.)
*/
if (immediate_only && deferredTriggers->deftrig_events_imm != NULL)
{
@@ -1984,17 +1986,18 @@ deferredTriggerInvokeEvents(bool immediate_only)
rel = heap_open(event->dte_relid, NoLock);
/*
- * Copy relation's trigger info so that we have a stable
- * copy no matter what the called triggers do.
+ * Copy relation's trigger info so that we have a
+ * stable copy no matter what the called triggers do.
*/
trigdesc = CopyTriggerDesc(rel->trigdesc);
- if (trigdesc == NULL) /* should not happen */
+ if (trigdesc == NULL) /* should not happen */
elog(ERROR, "relation %u has no triggers",
event->dte_relid);
/*
- * Allocate space to cache fmgr lookup info for triggers.
+ * Allocate space to cache fmgr lookup info for
+ * triggers.
*/
finfo = (FmgrInfo *)
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -2089,21 +2092,23 @@ void
DeferredTriggerBeginXact(void)
{
/*
- * This will be changed to a special context when
- * the nested transactions project moves forward.
+ * This will be changed to a special context when the nested
+ * transactions project moves forward.
*/
MemoryContext cxt = TopTransactionContext;
+
deferredTriggers = (DeferredTriggers) MemoryContextAlloc(TopTransactionContext,
- sizeof(DeferredTriggersData));
+ sizeof(DeferredTriggersData));
/*
* Create the per transaction memory context
*/
deferredTriggers->deftrig_cxt = AllocSetContextCreate(cxt,
- "DeferredTriggerXact",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "DeferredTriggerXact",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+
/*
* If unspecified, constraints default to IMMEDIATE, per SQL
*/
@@ -2174,7 +2179,7 @@ DeferredTriggerAbortXact(void)
* Ignore call if we aren't in a transaction.
*/
if (deferredTriggers == NULL)
- return;
+ return;
/*
* Forget everything we know about deferred triggers.
@@ -2255,7 +2260,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (strlen(cname) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("unnamed constraints cannot be set explicitly")));
+ errmsg("unnamed constraints cannot be set explicitly")));
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2304,7 +2309,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (!found)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" does not exist", cname)));
+ errmsg("constraint \"%s\" does not exist", cname)));
}
heap_close(tgrel, AccessShareLock);
@@ -2349,9 +2354,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* CONSTRAINTS command applies retroactively. This happens "for free"
* since we have already made the necessary modifications to the
* constraints, and deferredTriggerEndQuery() is called by
- * finish_xact_command(). But we must reset deferredTriggerInvokeEvents'
- * tail pointer to make it rescan the entire list, in case some deferred
- * events are now immediately invokable.
+ * finish_xact_command(). But we must reset
+ * deferredTriggerInvokeEvents' tail pointer to make it rescan the
+ * entire list, in case some deferred events are now immediately
+ * invokable.
*/
deferredTriggers->deftrig_events_imm = NULL;
}
@@ -2416,7 +2422,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
*/
for (i = 0; i < ntriggers; i++)
{
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
+ Trigger *trigger = &trigdesc->triggers[tgindx[i]];
if (trigger->tgenabled)
n_enabled_triggers++;
@@ -2455,7 +2461,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
ev_item = &(new_event->dte_item[i]);
ev_item->dti_tgoid = trigger->tgoid;
- ev_item->dti_state =
+ ev_item->dti_state =
((trigger->tgdeferrable) ?
TRIGGER_DEFERRED_DEFERRABLE : 0) |
((trigger->tginitdeferred) ?
@@ -2464,9 +2470,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
if (row_trigger && (trigdesc->n_before_row[event] > 0))
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
else if (!row_trigger && (trigdesc->n_before_statement[event] > 0))
- {
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
- }
}
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 275143c1517..57bc7c5f71f 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.40 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.41 2003/08/04 00:43:17 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -66,11 +66,11 @@
/* result structure for get_rels_with_domain() */
typedef struct
{
- Relation rel; /* opened and locked relation */
- int natts; /* number of attributes of interest */
- int *atts; /* attribute numbers */
+ Relation rel; /* opened and locked relation */
+ int natts; /* number of attributes of interest */
+ int *atts; /* attribute numbers */
/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
-} RelToCheck;
+} RelToCheck;
static Oid findTypeInputFunction(List *procname, Oid typeOid);
@@ -80,9 +80,9 @@ static Oid findTypeSendFunction(List *procname, Oid typeOid);
static List *get_rels_with_domain(Oid domainOid, LOCKMODE lockmode);
static void domainOwnerCheck(HeapTuple tup, TypeName *typename);
static char *domainAddConstraint(Oid domainOid, Oid domainNamespace,
- Oid baseTypeOid,
- int typMod, Constraint *constr,
- int *counter, char *domainName);
+ Oid baseTypeOid,
+ int typMod, Constraint *constr,
+ int *counter, char *domainName);
/*
@@ -105,7 +105,7 @@ DefineType(List *names, List *parameters)
bool byValue = false;
char delimiter = DEFAULT_TYPDELIM;
char alignment = 'i'; /* default alignment */
- char storage = 'p'; /* default TOAST storage method */
+ char storage = 'p'; /* default TOAST storage method */
Oid inputOid;
Oid outputOid;
Oid receiveOid = InvalidOid;
@@ -237,8 +237,8 @@ DefineType(List *names, List *parameters)
/*
* Look to see if type already exists (presumably as a shell; if not,
- * TypeCreate will complain). If it doesn't, create it as a shell,
- * so that the OID is known for use in the I/O function definitions.
+ * TypeCreate will complain). If it doesn't, create it as a shell, so
+ * that the OID is known for use in the I/O function definitions.
*/
typoid = GetSysCacheOid(TYPENAMENSP,
CStringGetDatum(typeName),
@@ -492,7 +492,7 @@ DefineDomain(CreateDomainStmt *stmt)
List *listptr;
Oid basetypeoid;
Oid domainoid;
- Form_pg_type baseType;
+ Form_pg_type baseType;
int counter = 0;
/* Convert list of names to a name and namespace */
@@ -508,10 +508,11 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer. (This test is presently
- * useless since the parser will have truncated the name to fit. But
- * leave it here since we may someday support arrays of domains, in
- * which case we'll be back to needing to enforce NAMEDATALEN-2.)
+ * prefix. So they can be one character longer. (This test is
+ * presently useless since the parser will have truncated the name to
+ * fit. But leave it here since we may someday support arrays of
+ * domains, in which case we'll be back to needing to enforce
+ * NAMEDATALEN-2.)
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
ereport(ERROR,
@@ -581,8 +582,8 @@ DefineDomain(CreateDomainStmt *stmt)
basetypelem = baseType->typelem;
/*
- * Run through constraints manually to avoid the additional
- * processing conducted by DefineRelation() and friends.
+ * Run through constraints manually to avoid the additional processing
+ * conducted by DefineRelation() and friends.
*/
foreach(listptr, schema)
{
@@ -594,7 +595,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -606,6 +607,7 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
case CONSTR_DEFAULT:
+
/*
* The inherited default value may be overridden by the
* user with the DEFAULT <expr> statement.
@@ -643,7 +645,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && !typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = true;
nullDefined = true;
break;
@@ -652,41 +654,42 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = false;
nullDefined = true;
- break;
+ break;
+
+ case CONSTR_CHECK:
- case CONSTR_CHECK:
/*
- * Check constraints are handled after domain creation, as they
- * require the Oid of the domain
+ * Check constraints are handled after domain creation, as
+ * they require the Oid of the domain
*/
- break;
+ break;
/*
* All else are error cases
*/
- case CONSTR_UNIQUE:
- ereport(ERROR,
+ case CONSTR_UNIQUE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
- break;
+ errmsg("UNIQUE constraints not supported for domains")));
+ break;
- case CONSTR_PRIMARY:
- ereport(ERROR,
+ case CONSTR_PRIMARY:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PRIMARY KEY constraints not supported for domains")));
- break;
+ break;
- case CONSTR_ATTR_DEFERRABLE:
- case CONSTR_ATTR_NOT_DEFERRABLE:
- case CONSTR_ATTR_DEFERRED:
- case CONSTR_ATTR_IMMEDIATE:
- ereport(ERROR,
+ case CONSTR_ATTR_DEFERRABLE:
+ case CONSTR_ATTR_NOT_DEFERRABLE:
+ case CONSTR_ATTR_DEFERRED:
+ case CONSTR_ATTR_IMMEDIATE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("deferrability constraints not supported for domains")));
- break;
+ break;
default:
elog(ERROR, "unrecognized constraint subtype: %d",
@@ -715,15 +718,16 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid, /* base type ID */
defaultValue, /* default type value (text) */
defaultValueBin, /* default type value (binary) */
- byValue, /* passed by value */
- alignment, /* required alignment */
- storage, /* TOAST strategy */
- stmt->typename->typmod, /* typeMod value */
- typNDims, /* Array dimensions for base type */
- typNotNull); /* Type NOT NULL */
+ byValue, /* passed by value */
+ alignment, /* required alignment */
+ storage, /* TOAST strategy */
+ stmt->typename->typmod, /* typeMod value */
+ typNDims, /* Array dimensions for base type */
+ typNotNull); /* Type NOT NULL */
/*
- * Process constraints which refer to the domain ID returned by TypeCreate
+ * Process constraints which refer to the domain ID returned by
+ * TypeCreate
*/
foreach(listptr, schema)
{
@@ -733,16 +737,16 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
domainAddConstraint(domainoid, domainNamespace,
basetypeoid, stmt->typename->typmod,
constr, &counter, domainName);
- break;
+ break;
- /* Other constraint types were fully processed above */
+ /* Other constraint types were fully processed above */
default:
- break;
+ break;
}
}
@@ -834,8 +838,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or
* three arguments (string, element OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING;
- * if we see this, we issue a NOTICE and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
+ * see this, we issue a NOTICE and fix up the pg_proc entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -874,9 +878,10 @@ findTypeInputFunction(List *procname, Oid typeOid)
(errmsg("changing argument type of function %s from OPAQUE to CSTRING",
NameListToString(procname))));
SetFunctionArgType(procOid, 0, CSTRINGOID);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -905,8 +910,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
* arguments (data value, element OID).
*
* For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a NOTICE and fix up the
- * pg_proc entry.
+ * type name; if we see this, we issue a NOTICE and fix up the pg_proc
+ * entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -940,12 +945,13 @@ findTypeOutputFunction(List *procname, Oid typeOid)
{
/* Found, but must complain and fix the pg_proc entry */
ereport(NOTICE,
- (errmsg("changing argument type of function %s from OPAQUE to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from OPAQUE to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -1050,7 +1056,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
if (coldeflist == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("composite type must have at least one attribute")));
+ errmsg("composite type must have at least one attribute")));
/*
* now create the parameters for keys/inheritance etc. All of them are
@@ -1072,7 +1078,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
/*
* AlterDomainDefault
*
- * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
+ * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
*/
void
AlterDomainDefault(List *names, Node *defaultRaw)
@@ -1083,12 +1089,12 @@ AlterDomainDefault(List *names, Node *defaultRaw)
ParseState *pstate;
Relation rel;
char *defaultValue;
- Node *defaultExpr = NULL; /* NULL if no default specified */
+ Node *defaultExpr = NULL; /* NULL if no default specified */
Datum new_record[Natts_pg_type];
char new_record_nulls[Natts_pg_type];
char new_record_repl[Natts_pg_type];
HeapTuple newtuple;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1113,7 +1119,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Setup new tuple */
@@ -1129,9 +1135,10 @@ AlterDomainDefault(List *names, Node *defaultRaw)
{
/* Create a dummy ParseState for transformExpr */
pstate = make_parsestate(NULL);
+
/*
- * Cook the colDef->raw_expr into an expression. Note:
- * Name is strictly for error message
+ * Cook the colDef->raw_expr into an expression. Note: Name is
+ * strictly for error message
*/
defaultExpr = cookDefault(pstate, defaultRaw,
typTup->typbasetype,
@@ -1139,27 +1146,29 @@ AlterDomainDefault(List *names, Node *defaultRaw)
NameStr(typTup->typname));
/*
- * Expression must be stored as a nodeToString result, but
- * we also require a valid textual representation (mainly
- * to make life easier for pg_dump).
+ * Expression must be stored as a nodeToString result, but we also
+ * require a valid textual representation (mainly to make life
+ * easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(NameStr(typTup->typname),
- InvalidOid),
+ deparse_context_for(NameStr(typTup->typname),
+ InvalidOid),
false, false);
+
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(
- nodeToString(defaultExpr)));
+ CStringGetDatum(
+ nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultValue));
+ CStringGetDatum(defaultValue));
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
}
- else /* Default is NULL, drop it */
+ else
+/* Default is NULL, drop it */
{
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
@@ -1168,7 +1177,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
}
newtuple = heap_modifytuple(tup, rel,
- new_record, new_record_nulls, new_record_repl);
+ new_record, new_record_nulls, new_record_repl);
simple_heap_update(rel, &tup->t_self, newtuple);
@@ -1178,7 +1187,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
GenerateTypeDependencies(typTup->typnamespace,
domainoid,
typTup->typrelid,
- 0, /* relation kind is n/a */
+ 0, /* relation kind is n/a */
typTup->typinput,
typTup->typoutput,
typTup->typreceive,
@@ -1186,7 +1195,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
typTup->typelem,
typTup->typbasetype,
defaultExpr,
- true); /* Rebuild is true */
+ true); /* Rebuild is true */
/* Clean up */
heap_close(rel, NoLock);
@@ -1196,7 +1205,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/*
* AlterDomainNotNull
*
- * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
+ * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
*/
void
AlterDomainNotNull(List *names, bool notNull)
@@ -1205,7 +1214,7 @@ AlterDomainNotNull(List *names, bool notNull)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1231,7 +1240,7 @@ AlterDomainNotNull(List *names, bool notNull)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Is the domain already set to the desired constraint? */
@@ -1248,15 +1257,15 @@ AlterDomainNotNull(List *names, bool notNull)
/* Adding a NOT NULL constraint requires checking existing columns */
if (notNull)
{
- List *rels;
- List *rt;
+ List *rels;
+ List *rt;
/* Fetch relation list with attributes based on this domain */
/* ShareLock is sufficient to prevent concurrent data changes */
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1268,14 +1277,14 @@ AlterDomainNotNull(List *names, bool notNull)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1284,7 +1293,7 @@ AlterDomainNotNull(List *names, bool notNull)
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains NULL values",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
}
heap_endscan(scan);
@@ -1295,7 +1304,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's
+ * Okay to update pg_type row. We can scribble on typTup because it's
* a copy.
*/
typTup->typnotnull = notNull;
@@ -1321,7 +1330,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
Oid domainoid;
HeapTuple tup;
Relation rel;
- Form_pg_type typTup;
+ Form_pg_type typTup;
Relation conrel;
SysScanDesc conscan;
ScanKeyData key[1];
@@ -1350,7 +1359,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Grab an appropriate lock on the pg_constraint relation */
@@ -1403,15 +1412,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
- List *rels;
- List *rt;
- EState *estate;
+ Form_pg_type typTup;
+ List *rels;
+ List *rt;
+ EState *estate;
ExprContext *econtext;
- char *ccbin;
- Expr *expr;
- ExprState *exprstate;
- int counter = 0;
+ char *ccbin;
+ Expr *expr;
+ ExprState *exprstate;
+ int counter = 0;
Constraint *constr;
/* Make a TypeName so we can use standard type lookup machinery */
@@ -1438,14 +1447,14 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Check for unsupported constraint types */
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -1469,20 +1478,20 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
errmsg("use ALTER DOMAIN .. [ SET | DROP ] NOT NULL instead")));
break;
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
/* processed below */
- break;
+ break;
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
+ errmsg("UNIQUE constraints not supported for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("PRIMARY KEY constraints not supported for domains")));
+ errmsg("PRIMARY KEY constraints not supported for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1501,18 +1510,18 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
}
/*
- * Since all other constraint types throw errors, this must be
- * a check constraint. First, process the constraint expression
- * and add an entry to pg_constraint.
+ * Since all other constraint types throw errors, this must be a check
+ * constraint. First, process the constraint expression and add an
+ * entry to pg_constraint.
*/
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
typTup->typbasetype, typTup->typtypmod,
- constr, &counter, NameStr(typTup->typname));
+ constr, &counter, NameStr(typTup->typname));
/*
- * Test all values stored in the attributes based on the domain
- * the constraint is being added to.
+ * Test all values stored in the attributes based on the domain the
+ * constraint is being added to.
*/
expr = (Expr *) stringToNode(ccbin);
@@ -1528,7 +1537,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1540,15 +1549,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
- Datum conResult;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
+ Datum conResult;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1564,7 +1573,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains values that violate the new constraint",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
ResetExprContext(econtext);
@@ -1610,7 +1619,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
static List *
get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
{
- List *result = NIL;
+ List *result = NIL;
Relation depRel;
ScanKeyData key[2];
SysScanDesc depScan;
@@ -1634,10 +1643,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
{
- Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
+ Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
RelToCheck *rtc = NULL;
List *rellist;
- Form_pg_attribute pg_att;
+ Form_pg_attribute pg_att;
int ptr;
/* Ignore dependees that aren't user columns of tables */
@@ -1675,10 +1684,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
}
/*
- * Confirm column has not been dropped, and is of the expected type.
- * This defends against an ALTER DROP COLUMN occuring just before
- * we acquired lock ... but if the whole table were dropped, we'd
- * still have a problem.
+ * Confirm column has not been dropped, and is of the expected
+ * type. This defends against an ALTER DROP COLUMN occuring just
+ * before we acquired lock ... but if the whole table were
+ * dropped, we'd still have a problem.
*/
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
continue;
@@ -1687,16 +1696,16 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
- * order; this is just a hack to improve predictability of regression
- * test output ...
+ * Okay, add column to result. We store the columns in
+ * column-number order; this is just a hack to improve
+ * predictability of regression test output ...
*/
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
ptr = rtc->natts++;
- while (ptr > 0 && rtc->atts[ptr-1] > pg_depend->objsubid)
+ while (ptr > 0 && rtc->atts[ptr - 1] > pg_depend->objsubid)
{
- rtc->atts[ptr] = rtc->atts[ptr-1];
+ rtc->atts[ptr] = rtc->atts[ptr - 1];
ptr--;
}
rtc->atts[ptr] = pg_depend->objsubid;
@@ -1719,7 +1728,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
static void
domainOwnerCheck(HeapTuple tup, TypeName *typename)
{
- Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
+ Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
/* Check that this is actually a domain */
if (typTup->typtype != 'd')
@@ -1746,7 +1755,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
char *ccsrc;
char *ccbin;
ParseState *pstate;
- CoerceToDomainValue *domVal;
+ CoerceToDomainValue *domVal;
/*
* Assign or validate constraint name
@@ -1759,8 +1768,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for domain \"%s\" already exists",
- constr->name, domainName)));
+ errmsg("constraint \"%s\" for domain \"%s\" already exists",
+ constr->name, domainName)));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_DOMAIN,
@@ -1775,10 +1784,10 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE
- * in the expression. Note that it will appear to have the type of the
- * base type, not the domain. This seems correct since within the
- * check expression, we should not assume the input value can be considered
- * a member of the domain.
+ * in the expression. Note that it will appear to have the type of
+ * the base type, not the domain. This seems correct since within the
+ * check expression, we should not assume the input value can be
+ * considered a member of the domain.
*/
domVal = makeNode(CoerceToDomainValue);
domVal->typeId = baseTypeOid;
@@ -1841,13 +1850,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Store the constraint in pg_constraint
*/
- CreateConstraintEntry(constr->name, /* Constraint Name */
- domainNamespace, /* namespace */
+ CreateConstraintEntry(constr->name, /* Constraint Name */
+ domainNamespace, /* namespace */
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
- InvalidOid, /* not a relation constraint */
- NULL,
+ InvalidOid, /* not a relation constraint */
+ NULL,
0,
domainOid, /* domain constraint */
InvalidOid, /* Foreign key fields */
@@ -1857,13 +1866,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
' ',
' ',
InvalidOid,
- expr, /* Tree form check constraint */
+ expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
- ccsrc); /* Source form check constraint */
+ ccsrc); /* Source form check constraint */
/*
- * Return the compiled constraint expression so the calling routine can
- * perform any additional required tests.
+ * Return the compiled constraint expression so the calling routine
+ * can perform any additional required tests.
*/
return ccbin;
}
@@ -1893,7 +1902,7 @@ GetDomainConstraints(Oid typeOid)
Form_pg_type typTup;
ScanKeyData key[1];
SysScanDesc scan;
-
+
tup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typeOid),
0, 0, 0);
@@ -1915,17 +1924,20 @@ GetDomainConstraints(Oid typeOid)
while (HeapTupleIsValid(conTup = systable_getnext(scan)))
{
- Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
- Datum val;
- bool isNull;
- Expr *check_expr;
+ Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
+ Datum val;
+ bool isNull;
+ Expr *check_expr;
DomainConstraintState *r;
/* Ignore non-CHECK constraints (presently, shouldn't be any) */
if (c->contype != CONSTRAINT_CHECK)
continue;
- /* Not expecting conbin to be NULL, but we'll test for it anyway */
+ /*
+ * Not expecting conbin to be NULL, but we'll test for it
+ * anyway
+ */
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
conRel->rd_att, &isNull);
if (isNull)
@@ -1945,8 +1957,8 @@ GetDomainConstraints(Oid typeOid)
r->check_expr = ExecInitExpr(check_expr, NULL);
/*
- * use lcons() here because constraints of lower domains should
- * be applied earlier.
+ * use lcons() here because constraints of lower domains
+ * should be applied earlier.
*/
result = lcons(r, result);
}
@@ -2003,7 +2015,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
Oid typeOid;
Relation rel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 36416a5232f..117eef1e750 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.122 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.123 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,12 +146,12 @@ write_group_file(Relation grel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_group and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_group and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -212,7 +212,7 @@ write_group_file(Relation grel)
if (usename[j] != '\0')
{
ereport(LOG,
- (errmsg("invalid user name \"%s\"", usename)));
+ (errmsg("invalid user name \"%s\"", usename)));
continue;
}
@@ -245,7 +245,7 @@ write_group_file(Relation grel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -294,12 +294,12 @@ write_user_file(Relation urel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_shadow and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_shadow and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(urel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -376,7 +376,7 @@ write_user_file(Relation urel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -430,10 +430,10 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
Relation urel = NULL;
Relation grel = NULL;
- if (! (user_file_update_needed || group_file_update_needed))
+ if (!(user_file_update_needed || group_file_update_needed))
return;
- if (! isCommit)
+ if (!isCommit)
{
user_file_update_needed = false;
group_file_update_needed = false;
@@ -441,12 +441,12 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
}
/*
- * We use ExclusiveLock to ensure that only one backend writes the flat
- * file(s) at a time. That's sufficient because it's okay to allow plain
- * reads of the tables in parallel. There is some chance of a deadlock
- * here (if we were triggered by a user update of pg_shadow or pg_group,
- * which likely won't have gotten a strong enough lock), so get the locks
- * we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the
+ * flat file(s) at a time. That's sufficient because it's okay to
+ * allow plain reads of the tables in parallel. There is some chance
+ * of a deadlock here (if we were triggered by a user update of
+ * pg_shadow or pg_group, which likely won't have gotten a strong
+ * enough lock), so get the locks we need before writing anything.
*/
if (user_file_update_needed)
urel = heap_openr(ShadowRelationName, ExclusiveLock);
@@ -1088,7 +1088,7 @@ DropUser(DropUserStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("user \"%s\" cannot be dropped", user),
- errdetail("The user owns database \"%s\".", dbname)));
+ errdetail("The user owns database \"%s\".", dbname)));
}
heap_endscan(scan);
@@ -1172,10 +1172,10 @@ RenameUser(const char *oldname, const char *newname)
errmsg("user \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the session user
- * somewhere, so renaming it could cause confusion. On the other
- * hand, there may not be an actual problem besides a little
- * confusion, so think about this and decide.
+ * XXX Client applications probably store the session user somewhere,
+ * so renaming it could cause confusion. On the other hand, there may
+ * not be an actual problem besides a little confusion, so think about
+ * this and decide.
*/
if (((Form_pg_shadow) GETSTRUCT(tup))->usesysid == GetSessionUserId())
ereport(ERROR,
@@ -1221,14 +1221,14 @@ CheckPgUserAclNotNull(void)
htup = SearchSysCache(RELOID,
ObjectIdGetDatum(RelOid_pg_shadow),
0, 0, 0);
- if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
+ if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
elog(ERROR, "cache lookup failed for relation %u", RelOid_pg_shadow);
if (heap_attisnull(htup, Anum_pg_class_relacl))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("before using passwords you must revoke permissions on %s",
- ShadowRelationName),
+ errmsg("before using passwords you must revoke permissions on %s",
+ ShadowRelationName),
errdetail("This restriction is to prevent unprivileged users from reading the passwords."),
errhint("Try 'REVOKE ALL ON \"%s\" FROM PUBLIC'.",
ShadowRelationName)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index e73ace27c27..9dc0d9a8996 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.257 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.258 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -287,24 +287,25 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum)
{
- if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If we vacuumed, use new transaction for analyze.
- * Otherwise, we can use the outer transaction, but we still
- * need to call analyze_rel in a memory context that will be
- * cleaned up on return (else we leak memory while processing
- * multiple tables).
+ * If we vacuumed, use new transaction for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (vacstmt->vacuum)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
@@ -734,7 +735,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/* Begin a transaction for vacuuming this relation */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
/*
* Check for user-requested abort. Note we want this to be inside a
@@ -812,7 +814,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp tables */
+ return true; /* assume no long-lived data in temp
+ * tables */
}
/*
@@ -860,7 +863,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*/
if (toast_relid != InvalidOid)
{
- if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+ if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
result = false; /* failed to vacuum the TOAST table? */
}
@@ -1087,8 +1090,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
@@ -1314,7 +1317,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move destination.
+ * vacuuming; this is to keep us from using it as a move
+ * destination.
*/
if (notup)
{
@@ -1382,9 +1386,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead tuples cannot be removed yet.\n"
- "Nonremovable tuples range from %lu to %lu bytes long.\n"
+ "Nonremovable tuples range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable tuples) is %.0f bytes.\n"
+ "Total free space (including removable tuples) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
@@ -2380,8 +2384,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
- * vacuuming. So do it here and ignore the relatively small amount
- * of processing that occurs below.
+ * vacuuming. So do it here and ignore the relatively small amount of
+ * processing that occurs below.
*/
ereport(elevel,
(errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
@@ -2735,7 +2739,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -2752,7 +2756,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
@@ -2837,13 +2841,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small bits
- * of space. Although FSM would discard pages with little free space
- * anyway, it's important to do this prefiltering because (a) it reduces
- * the time spent holding the FSM lock in RecordRelationFreeSpace, and
- * (b) FSM uses the number of pages reported as a statistic for guiding
- * space management. If we didn't threshold our reports the same way
- * vacuumlazy.c does, we'd be skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small
+ * bits of space. Although FSM would discard pages with little free
+ * space anyway, it's important to do this prefiltering because (a) it
+ * reduces the time spent holding the FSM lock in
+ * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+ * reported as a statistic for guiding space management. If we didn't
+ * threshold our reports the same way vacuumlazy.c does, we'd be
+ * skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index f0be98a23ed..65af960be8e 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.29 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.30 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,7 +79,7 @@ typedef struct LVRelStats
bool fs_is_heap; /* are we using heap organization? */
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
- PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
+ PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
} LVRelStats;
@@ -162,7 +162,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
*/
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
lazy_truncate_heap(onerel, vacrelstats);
/* Update shared free space map with final free space info */
@@ -659,7 +659,7 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -966,16 +966,18 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*
* A page with less than stats->threshold free space will be forgotten
* immediately, and never passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular reduces
- * the amount of time we spend holding the FSM lock when we finally call
- * RecordRelationFreeSpace. Since the FSM will probably drop pages with
- * little free space anyway, there's no point in making this really small.
+ * uselessly small entries early saves cycles, and in particular
+ * reduces the amount of time we spend holding the FSM lock when we
+ * finally call RecordRelationFreeSpace. Since the FSM will probably
+ * drop pages with little free space anyway, there's no point in
+ * making this really small.
*
- * XXX Is it worth trying to measure average tuple size, and using that to
- * adjust the threshold? Would be worthwhile if FSM has no stats yet
- * for this relation. But changing the threshold as we scan the rel
- * might lead to bizarre behavior, too. Also, it's probably better if
- * vacuum.c has the same thresholding behavior as we do here.
+ * XXX Is it worth trying to measure average tuple size, and using that
+ * to adjust the threshold? Would be worthwhile if FSM has no stats
+ * yet for this relation. But changing the threshold as we scan the
+ * rel might lead to bizarre behavior, too. Also, it's probably
+ * better if vacuum.c has the same thresholding behavior as we do
+ * here.
*/
if (avail < vacrelstats->threshold)
return;
@@ -996,7 +998,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*----------
* The rest of this routine works with "heap" organization of the
* free space arrays, wherein we maintain the heap property
- * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
+ * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
* In particular, the zero'th element always has the smallest available
* space and can be discarded to make room for a new page with more space.
* See Knuth's discussion of heap-based priority queues, sec 5.2.3;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index e0b041636e6..07dfca13c84 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.85 2003/07/29 00:03:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.86 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
* to duplicate the test in AC_STRUCT_TIMEZONE.
*/
#ifdef HAVE_TZNAME
-#ifndef tzname /* For SGI. */
+#ifndef tzname /* For SGI. */
extern char *tzname[];
#endif
#endif
@@ -273,12 +273,11 @@ static void
clear_tz(void)
{
/*
- * unsetenv() works fine, but is BSD, not POSIX, and is not
- * available under Solaris, among others. Apparently putenv()
- * called as below clears the process-specific environment
- * variables. Other reasonable arguments to putenv() (e.g.
- * "TZ=", "TZ", "") result in a core dump (under Linux
- * anyway). - thomas 1998-01-26
+ * unsetenv() works fine, but is BSD, not POSIX, and is not available
+ * under Solaris, among others. Apparently putenv() called as below
+ * clears the process-specific environment variables. Other
+ * reasonable arguments to putenv() (e.g. "TZ=", "TZ", "") result in a
+ * core dump (under Linux anyway). - thomas 1998-01-26
*/
if (tzbuf[0] == 'T')
{
@@ -298,14 +297,14 @@ clear_tz(void)
*
* If tzname[1] is a nonempty string, *or* the global timezone variable is
* not zero, then tzset must have recognized the TZ value as something
- * different from UTC. Return true.
+ * different from UTC. Return true.
*
* Otherwise, check to see if the TZ name is a known spelling of "UTC"
* (ie, appears in our internal tables as a timezone equivalent to UTC).
* If so, accept it.
*
* This will reject nonstandard spellings of UTC unless tzset() chose to
- * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
+ * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
* do so, but on other systems we may be tightening the spec a little.
*
* Another problem is that on some platforms (eg HPUX), if tzset thinks the
@@ -337,8 +336,8 @@ tzset_succeeded(const char *tz)
return true;
/*
- * Check for known spellings of "UTC". Note we must downcase the input
- * before passing it to DecodePosixTimezone().
+ * Check for known spellings of "UTC". Note we must downcase the
+ * input before passing it to DecodePosixTimezone().
*/
StrNCpy(tztmp, tz, sizeof(tztmp));
for (cp = tztmp; *cp; cp++)
@@ -368,7 +367,7 @@ tz_acceptable(void)
/*
* To detect leap-second timekeeping, compute the time_t value for
- * local midnight, 2000-01-01. Insist that this be a multiple of 60;
+ * local midnight, 2000-01-01. Insist that this be a multiple of 60;
* any partial-minute offset has to be due to leap seconds.
*/
MemSet(&tt, 0, sizeof(tt));
@@ -399,7 +398,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
*/
if (!have_saved_tz)
{
- char *orig_tz = getenv("TZ");
+ char *orig_tz = getenv("TZ");
if (orig_tz)
StrNCpy(orig_tzbuf, orig_tz, sizeof(orig_tzbuf));
@@ -434,9 +433,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport, which is not desirable for GUC. We did what we could to
- * guard against this in flatten_set_variable_args, but a string
- * coming in from postgresql.conf might contain anything.
+ * ereport, which is not desirable for GUC. We did what we could
+ * to guard against this in flatten_set_variable_args, but a
+ * string coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
CStringGetDatum(val),
@@ -455,7 +454,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - interval->time;
+ CTimeZone = -interval->time;
HasCTZSet = true;
}
pfree(interval);
@@ -471,22 +470,22 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - hours * 3600;
+ CTimeZone = -hours * 3600;
HasCTZSet = true;
}
}
else if (strcasecmp(value, "UNKNOWN") == 0)
{
/*
- * UNKNOWN is the value shown as the "default" for TimeZone
- * in guc.c. We interpret it as meaning the original TZ
- * inherited from the environment. Note that if there is an
- * original TZ setting, we will return that rather than UNKNOWN
- * as the canonical spelling.
+ * UNKNOWN is the value shown as the "default" for TimeZone in
+ * guc.c. We interpret it as meaning the original TZ
+ * inherited from the environment. Note that if there is an
+ * original TZ setting, we will return that rather than
+ * UNKNOWN as the canonical spelling.
*/
if (doit)
{
- bool ok;
+ bool ok;
/* Revert to original setting of TZ, whatever it was */
if (orig_tzbuf[0])
@@ -516,14 +515,14 @@ assign_timezone(const char *value, bool doit, bool interactive)
* Otherwise assume it is a timezone name.
*
* We have to actually apply the change before we can have any
- * hope of checking it. So, save the old value in case we have
- * to back out. Note that it's possible the old setting is in
- * tzbuf, so we'd better copy it.
+ * hope of checking it. So, save the old value in case we
+ * have to back out. Note that it's possible the old setting
+ * is in tzbuf, so we'd better copy it.
*/
- char save_tzbuf[TZBUF_LEN];
- char *save_tz;
- bool known,
- acceptable;
+ char save_tzbuf[TZBUF_LEN];
+ char *save_tz;
+ bool known,
+ acceptable;
save_tz = getenv("TZ");
if (save_tz)
@@ -563,8 +562,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
{
ereport(interactive ? ERROR : LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timezone \"%s\" appears to use leap seconds",
- value),
+ errmsg("timezone \"%s\" appears to use leap seconds",
+ value),
errdetail("PostgreSQL does not support leap seconds")));
return NULL;
}
@@ -609,7 +608,7 @@ show_timezone(void)
Interval interval;
interval.month = 0;
- interval.time = - CTimeZone;
+ interval.time = -CTimeZone;
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
IntervalPGetDatum(&interval)));
@@ -703,16 +702,16 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
/*
* Note: if we are in startup phase then SetClientEncoding may not be
* able to really set the encoding. In this case we will assume that
- * the encoding is okay, and InitializeClientEncoding() will fix things
- * once initialization is complete.
+ * the encoding is okay, and InitializeClientEncoding() will fix
+ * things once initialization is complete.
*/
if (SetClientEncoding(encoding, doit) < 0)
{
if (interactive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName())));
+ errmsg("conversion between %s and %s is not supported",
+ value, GetDatabaseEncodingName())));
return NULL;
}
return value;
@@ -758,12 +757,12 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
/* not a saved ID, so look it up */
HeapTuple userTup;
- if (! IsTransactionState())
+ if (!IsTransactionState())
{
/*
* Can't do catalog lookups, so fail. The upshot of this is
- * that session_authorization cannot be set in postgresql.conf,
- * which seems like a good thing anyway.
+ * that session_authorization cannot be set in
+ * postgresql.conf, which seems like a good thing anyway.
*/
return NULL;
}
@@ -782,7 +781,7 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
usesysid = ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid;
is_superuser = ((Form_pg_shadow) GETSTRUCT(userTup))->usesuper;
-
+
ReleaseSysCache(userTup);
}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index e12ae0af686..9c3b372b3f7 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.75 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.76 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -190,8 +190,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change datatype of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change datatype of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}