aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/aggregatecmds.c21
-rw-r--r--src/backend/commands/alter.c6
-rw-r--r--src/backend/commands/analyze.c157
-rw-r--r--src/backend/commands/async.c43
-rw-r--r--src/backend/commands/cluster.c38
-rw-r--r--src/backend/commands/comment.c50
-rw-r--r--src/backend/commands/conversioncmds.c11
-rw-r--r--src/backend/commands/copy.c219
-rw-r--r--src/backend/commands/dbcommands.c73
-rw-r--r--src/backend/commands/define.c8
-rw-r--r--src/backend/commands/explain.c4
-rw-r--r--src/backend/commands/functioncmds.c32
-rw-r--r--src/backend/commands/indexcmds.c79
-rw-r--r--src/backend/commands/opclasscmds.c39
-rw-r--r--src/backend/commands/operatorcmds.c17
-rw-r--r--src/backend/commands/portalcmds.c34
-rw-r--r--src/backend/commands/prepare.c7
-rw-r--r--src/backend/commands/proclang.c18
-rw-r--r--src/backend/commands/schemacmds.c24
-rw-r--r--src/backend/commands/sequence.c14
-rw-r--r--src/backend/commands/tablecmds.c928
-rw-r--r--src/backend/commands/tablespace.c175
-rw-r--r--src/backend/commands/trigger.c106
-rw-r--r--src/backend/commands/typecmds.c65
-rw-r--r--src/backend/commands/user.c28
-rw-r--r--src/backend/commands/vacuum.c364
-rw-r--r--src/backend/commands/vacuumlazy.c36
-rw-r--r--src/backend/commands/variable.c69
-rw-r--r--src/backend/commands/view.c6
29 files changed, 1367 insertions, 1304 deletions
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index bc3affcf4bf..fcbd1df98dc 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.20 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.21 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -223,9 +223,9 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
/*
* if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a basetype
- * of ANYOID. This means that the aggregate applies to all basetypes
- * (eg, COUNT).
+ * that specific type; else attempt to find an aggregate with a
+ * basetype of ANYOID. This means that the aggregate applies to all
+ * basetypes (eg, COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
@@ -302,9 +302,9 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId)
/*
* if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a basetype
- * of ANYOID. This means that the aggregate applies to all basetypes
- * (eg, COUNT).
+ * that specific type; else attempt to find an aggregate with a
+ * basetype of ANYOID. This means that the aggregate applies to all
+ * basetypes (eg, COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
@@ -322,7 +322,7 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId)
elog(ERROR, "cache lookup failed for function %u", procOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -334,7 +334,10 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
procForm->proowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index 11d9c828f69..3e08a551f65 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.10 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.11 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@
/*
- * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
+ * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
* type, the function appropriate to that type is executed.
*/
void
@@ -153,7 +153,7 @@ ExecRenameStmt(RenameStmt *stmt)
void
ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
{
- AclId newowner = get_usesysid(stmt->newowner);
+ AclId newowner = get_usesysid(stmt->newowner);
switch (stmt->objectType)
{
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 45aff53796d..ce7db272113 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.75 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.76 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,9 +42,9 @@
/* Data structure for Algorithm S from Knuth 3.4.2 */
typedef struct
{
- BlockNumber N; /* number of blocks, known in advance */
+ BlockNumber N; /* number of blocks, known in advance */
int n; /* desired sample size */
- BlockNumber t; /* current block number */
+ BlockNumber t; /* current block number */
int m; /* blocks selected so far */
} BlockSamplerData;
typedef BlockSamplerData *BlockSampler;
@@ -68,13 +68,13 @@ static MemoryContext anl_context = NULL;
static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
- int samplesize);
+ int samplesize);
static bool BlockSampler_HasMore(BlockSampler bs);
static BlockNumber BlockSampler_Next(BlockSampler bs);
static void compute_index_stats(Relation onerel, double totalrows,
- AnlIndexData *indexdata, int nindexes,
- HeapTuple *rows, int numrows,
- MemoryContext col_context);
+ AnlIndexData *indexdata, int nindexes,
+ HeapTuple *rows, int numrows,
+ MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum);
static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
int targrows, double *totalrows);
@@ -157,9 +157,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
}
/*
- * Check that it's a plain table; we used to do this in
- * get_rel_oids() but seems safer to check after we've locked the
- * relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids()
+ * but seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != RELKIND_RELATION)
{
@@ -239,9 +238,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
}
/*
- * Open all indexes of the relation, and see if there are any analyzable
- * columns in the indexes. We do not analyze index columns if there was
- * an explicit column list in the ANALYZE command, however.
+ * Open all indexes of the relation, and see if there are any
+ * analyzable columns in the indexes. We do not analyze index columns
+ * if there was an explicit column list in the ANALYZE command,
+ * however.
*/
vac_open_indexes(onerel, &nindexes, &Irel);
hasindex = (nindexes > 0);
@@ -253,10 +253,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
for (ind = 0; ind < nindexes; ind++)
{
AnlIndexData *thisdata = &indexdata[ind];
- IndexInfo *indexInfo;
+ IndexInfo *indexInfo;
thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]);
- thisdata->tupleFract = 1.0; /* fix later if partial */
+ thisdata->tupleFract = 1.0; /* fix later if partial */
if (indexInfo->ii_Expressions != NIL && vacstmt->va_cols == NIL)
{
ListCell *indexpr_item = list_head(indexInfo->ii_Expressions);
@@ -273,25 +273,26 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/* Found an index expression */
Node *indexkey;
- if (indexpr_item == NULL) /* shouldn't happen */
+ if (indexpr_item == NULL) /* shouldn't happen */
elog(ERROR, "too few entries in indexprs list");
indexkey = (Node *) lfirst(indexpr_item);
indexpr_item = lnext(indexpr_item);
/*
- * Can't analyze if the opclass uses a storage type
- * different from the expression result type. We'd
- * get confused because the type shown in pg_attribute
- * for the index column doesn't match what we are
- * getting from the expression. Perhaps this can be
- * fixed someday, but for now, punt.
+ * Can't analyze if the opclass uses a storage
+ * type different from the expression result type.
+ * We'd get confused because the type shown in
+ * pg_attribute for the index column doesn't match
+ * what we are getting from the expression.
+ * Perhaps this can be fixed someday, but for now,
+ * punt.
*/
if (exprType(indexkey) !=
Irel[ind]->rd_att->attrs[i]->atttypid)
continue;
thisdata->vacattrstats[tcnt] =
- examine_attribute(Irel[ind], i+1);
+ examine_attribute(Irel[ind], i + 1);
if (thisdata->vacattrstats[tcnt] != NULL)
{
tcnt++;
@@ -401,10 +402,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* If we are running a standalone ANALYZE, update pages/tuples stats
- * in pg_class. We know the accurate page count from the smgr,
- * but only an approximate number of tuples; therefore, if we are part
- * of VACUUM ANALYZE do *not* overwrite the accurate count already
- * inserted by VACUUM. The same consideration applies to indexes.
+ * in pg_class. We know the accurate page count from the smgr, but
+ * only an approximate number of tuples; therefore, if we are part of
+ * VACUUM ANALYZE do *not* overwrite the accurate count already
+ * inserted by VACUUM. The same consideration applies to indexes.
*/
if (!vacstmt->vacuum)
{
@@ -446,7 +447,7 @@ compute_index_stats(Relation onerel, double totalrows,
MemoryContext col_context)
{
MemoryContext ind_context,
- old_context;
+ old_context;
TupleDesc heapDescriptor;
Datum attdata[INDEX_MAX_KEYS];
char nulls[INDEX_MAX_KEYS];
@@ -465,7 +466,7 @@ compute_index_stats(Relation onerel, double totalrows,
for (ind = 0; ind < nindexes; ind++)
{
AnlIndexData *thisdata = &indexdata[ind];
- IndexInfo *indexInfo = thisdata->indexInfo;
+ IndexInfo *indexInfo = thisdata->indexInfo;
int attr_cnt = thisdata->attr_cnt;
TupleTable tupleTable;
TupleTableSlot *slot;
@@ -526,8 +527,9 @@ compute_index_stats(Relation onerel, double totalrows,
if (attr_cnt > 0)
{
/*
- * Evaluate the index row to compute expression values.
- * We could do this by hand, but FormIndexDatum is convenient.
+ * Evaluate the index row to compute expression values. We
+ * could do this by hand, but FormIndexDatum is
+ * convenient.
*/
FormIndexDatum(indexInfo,
heapTuple,
@@ -535,16 +537,17 @@ compute_index_stats(Relation onerel, double totalrows,
estate,
attdata,
nulls);
+
/*
* Save just the columns we care about.
*/
for (i = 0; i < attr_cnt; i++)
{
VacAttrStats *stats = thisdata->vacattrstats[i];
- int attnum = stats->attr->attnum;
+ int attnum = stats->attr->attnum;
- exprvals[tcnt] = attdata[attnum-1];
- exprnulls[tcnt] = (nulls[attnum-1] == 'n');
+ exprvals[tcnt] = attdata[attnum - 1];
+ exprnulls[tcnt] = (nulls[attnum - 1] == 'n');
tcnt++;
}
}
@@ -552,7 +555,8 @@ compute_index_stats(Relation onerel, double totalrows,
/*
* Having counted the number of rows that pass the predicate in
- * the sample, we can estimate the total number of rows in the index.
+ * the sample, we can estimate the total number of rows in the
+ * index.
*/
thisdata->tupleFract = (double) numindexrows / (double) numrows;
totalindexrows = ceil(thisdata->tupleFract * totalrows);
@@ -630,7 +634,7 @@ examine_attribute(Relation onerel, int attnum)
stats->tupattnum = attnum;
/*
- * Call the type-specific typanalyze function. If none is specified,
+ * Call the type-specific typanalyze function. If none is specified,
* use std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
@@ -667,10 +671,10 @@ static void
BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize)
{
bs->N = nblocks; /* measured table size */
+
/*
- * If we decide to reduce samplesize for tables that have less or
- * not much more than samplesize blocks, here is the place to do
- * it.
+ * If we decide to reduce samplesize for tables that have less or not
+ * much more than samplesize blocks, here is the place to do it.
*/
bs->n = samplesize;
bs->t = 0; /* blocks scanned so far */
@@ -686,10 +690,10 @@ BlockSampler_HasMore(BlockSampler bs)
static BlockNumber
BlockSampler_Next(BlockSampler bs)
{
- BlockNumber K = bs->N - bs->t; /* remaining blocks */
+ BlockNumber K = bs->N - bs->t; /* remaining blocks */
int k = bs->n - bs->m; /* blocks still to sample */
- double p; /* probability to skip block */
- double V; /* random */
+ double p; /* probability to skip block */
+ double V; /* random */
Assert(BlockSampler_HasMore(bs)); /* hence K > 0 and k > 0 */
@@ -706,7 +710,7 @@ BlockSampler_Next(BlockSampler bs)
* If we are to skip, we should advance t (hence decrease K), and
* repeat the same probabilistic test for the next block. The naive
* implementation thus requires a random_fract() call for each block
- * number. But we can reduce this to one random_fract() call per
+ * number. But we can reduce this to one random_fract() call per
* selected block, by noting that each time the while-test succeeds,
* we can reinterpret V as a uniform random number in the range 0 to p.
* Therefore, instead of choosing a new V, we just adjust p to be
@@ -770,11 +774,11 @@ static int
acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
double *totalrows)
{
- int numrows = 0; /* # rows collected */
- double liverows = 0; /* # rows seen */
+ int numrows = 0; /* # rows collected */
+ double liverows = 0; /* # rows seen */
double deadrows = 0;
- double rowstoskip = -1; /* -1 means not set yet */
- BlockNumber totalblocks;
+ double rowstoskip = -1; /* -1 means not set yet */
+ BlockNumber totalblocks;
BlockSamplerData bs;
double rstate;
@@ -826,14 +830,13 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
{
/*
* The first targrows live rows are simply copied into the
- * reservoir.
- * Then we start replacing tuples in the sample until
- * we reach the end of the relation. This algorithm is
- * from Jeff Vitter's paper (see full citation below).
+ * reservoir. Then we start replacing tuples in the sample
+ * until we reach the end of the relation. This algorithm
+ * is from Jeff Vitter's paper (see full citation below).
* It works by repeatedly computing the number of tuples
* to skip before selecting a tuple, which replaces a
- * randomly chosen element of the reservoir (current
- * set of tuples). At all times the reservoir is a true
+ * randomly chosen element of the reservoir (current set
+ * of tuples). At all times the reservoir is a true
* random sample of the tuples we've passed over so far,
* so when we fall off the end of the relation we're done.
*/
@@ -842,10 +845,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
else
{
/*
- * t in Vitter's paper is the number of records already
- * processed. If we need to compute a new S value, we
- * must use the not-yet-incremented value of liverows
- * as t.
+ * t in Vitter's paper is the number of records
+ * already processed. If we need to compute a new S
+ * value, we must use the not-yet-incremented value of
+ * liverows as t.
*/
if (rowstoskip < 0)
rowstoskip = get_next_S(liverows, targrows, &rstate);
@@ -853,10 +856,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
if (rowstoskip <= 0)
{
/*
- * Found a suitable tuple, so save it,
- * replacing one old tuple at random
+ * Found a suitable tuple, so save it, replacing
+ * one old tuple at random
*/
- int k = (int) (targrows * random_fract());
+ int k = (int) (targrows * random_fract());
Assert(k >= 0 && k < targrows);
heap_freetuple(rows[k]);
@@ -874,9 +877,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
else
{
/*
- * Count dead rows, but not empty slots. This information is
- * currently not used, but it seems likely we'll want it
- * someday.
+ * Count dead rows, but not empty slots. This information
+ * is currently not used, but it seems likely we'll want
+ * it someday.
*/
if (targtuple.t_data != NULL)
deadrows += 1;
@@ -888,12 +891,12 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
}
/*
- * If we didn't find as many tuples as we wanted then we're done.
- * No sort is needed, since they're already in order.
+ * If we didn't find as many tuples as we wanted then we're done. No
+ * sort is needed, since they're already in order.
*
* Otherwise we need to sort the collected tuples by position
- * (itempointer). It's not worth worrying about corner cases
- * where the tuples are already sorted.
+ * (itempointer). It's not worth worrying about corner cases where
+ * the tuples are already sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
@@ -907,7 +910,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
*totalrows = 0.0;
/*
- * Emit some interesting relation info
+ * Emit some interesting relation info
*/
ereport(elevel,
(errmsg("\"%s\": scanned %d of %u pages, "
@@ -1128,10 +1131,10 @@ update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats)
i = 0;
values[i++] = ObjectIdGetDatum(relid); /* starelid */
- values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
- values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
+ values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
+ values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */
- values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
+ values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
{
values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
@@ -1305,13 +1308,13 @@ static int *datumCmpTupnoLink;
static void compute_minimal_stats(VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
- int samplerows,
- double totalrows);
+ AnalyzeAttrFetchFunc fetchfunc,
+ int samplerows,
+ double totalrows);
static void compute_scalar_stats(VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
- int samplerows,
- double totalrows);
+ AnalyzeAttrFetchFunc fetchfunc,
+ int samplerows,
+ double totalrows);
static int compare_scalars(const void *a, const void *b);
static int compare_mcvs(const void *a, const void *b);
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index a0cb1cc393a..f9d257d1a11 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.114 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.115 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,7 +106,8 @@
*/
static List *pendingNotifies = NIL;
-static List *upperPendingNotifies = NIL; /* list of upper-xact lists */
+static List *upperPendingNotifies = NIL; /* list of upper-xact
+ * lists */
/*
* State for inbound notifies consists of two flags: one saying whether
@@ -524,25 +525,27 @@ AtCommit_Notify(void)
rTuple = heap_modifytuple(lTuple, lRel,
value, nulls, repl);
+
/*
* We cannot use simple_heap_update here because the tuple
* could have been modified by an uncommitted transaction;
* specifically, since UNLISTEN releases exclusive lock on
- * the table before commit, the other guy could already have
- * tried to unlisten. There are no other cases where we
- * should be able to see an uncommitted update or delete.
- * Therefore, our response to a HeapTupleBeingUpdated result
- * is just to ignore it. We do *not* wait for the other
- * guy to commit --- that would risk deadlock, and we don't
- * want to block while holding the table lock anyway for
- * performance reasons. We also ignore HeapTupleUpdated,
- * which could occur if the other guy commits between our
- * heap_getnext and heap_update calls.
+ * the table before commit, the other guy could already
+ * have tried to unlisten. There are no other cases where
+ * we should be able to see an uncommitted update or
+ * delete. Therefore, our response to a
+ * HeapTupleBeingUpdated result is just to ignore it. We
+ * do *not* wait for the other guy to commit --- that
+ * would risk deadlock, and we don't want to block while
+ * holding the table lock anyway for performance reasons.
+ * We also ignore HeapTupleUpdated, which could occur if
+ * the other guy commits between our heap_getnext and
+ * heap_update calls.
*/
result = heap_update(lRel, &lTuple->t_self, rTuple,
&ctid,
GetCurrentCommandId(), SnapshotAny,
- false /* no wait for commit */);
+ false /* no wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
@@ -620,7 +623,7 @@ AtAbort_Notify(void)
void
AtSubStart_Notify(void)
{
- MemoryContext old_cxt;
+ MemoryContext old_cxt;
/* Keep the list-of-lists in TopTransactionContext for simplicity */
old_cxt = MemoryContextSwitchTo(TopTransactionContext);
@@ -640,13 +643,14 @@ AtSubStart_Notify(void)
void
AtSubCommit_Notify(void)
{
- List *parentPendingNotifies;
+ List *parentPendingNotifies;
parentPendingNotifies = (List *) linitial(upperPendingNotifies);
upperPendingNotifies = list_delete_first(upperPendingNotifies);
/*
- * We could try to eliminate duplicates here, but it seems not worthwhile.
+ * We could try to eliminate duplicates here, but it seems not
+ * worthwhile.
*/
pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies);
}
@@ -836,7 +840,7 @@ EnableNotifyInterrupt(void)
bool
DisableNotifyInterrupt(void)
{
- bool result = (notifyInterruptEnabled != 0);
+ bool result = (notifyInterruptEnabled != 0);
notifyInterruptEnabled = 0;
@@ -914,11 +918,12 @@ ProcessIncomingNotify(void)
relname, (int) sourcePID);
NotifyMyFrontEnd(relname, sourcePID);
+
/*
* Rewrite the tuple with 0 in notification column.
*
- * simple_heap_update is safe here because no one else would
- * have tried to UNLISTEN us, so there can be no uncommitted
+ * simple_heap_update is safe here because no one else would have
+ * tried to UNLISTEN us, so there can be no uncommitted
* changes.
*/
rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl);
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 7b618db0727..0bce21ffb96 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.128 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.129 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -286,8 +286,8 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
/*
* We grab exclusive access to the target rel and index for the
* duration of the transaction. (This is redundant for the single-
- * transaction case, since cluster() already did it.) The index
- * lock is taken inside check_index_is_clusterable.
+ * transaction case, since cluster() already did it.) The index lock
+ * is taken inside check_index_is_clusterable.
*/
OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock);
@@ -391,7 +391,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid)
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temporary tables of other sessions")));
+ errmsg("cannot cluster temporary tables of other sessions")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
@@ -438,7 +438,7 @@ mark_index_clustered(Relation rel, Oid indexOid)
foreach(index, RelationGetIndexList(rel))
{
- Oid thisIndexOid = lfirst_oid(index);
+ Oid thisIndexOid = lfirst_oid(index);
indexTuple = SearchSysCacheCopy(INDEXRELID,
ObjectIdGetDatum(thisIndexOid),
@@ -540,8 +540,8 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast table,
- * which is all-new at this point). We do not need
+ * Rebuild each index on the relation (but not the toast table, which
+ * is all-new at this point). We do not need
* CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(tableOid, false);
@@ -569,7 +569,7 @@ make_new_heap(Oid OIDOldHeap, const char *NewName, Oid NewTableSpace)
OIDNewHeap = heap_create_with_catalog(NewName,
RelationGetNamespace(OldHeap),
- NewTableSpace,
+ NewTableSpace,
tupdesc,
OldHeap->rd_rel->relkind,
OldHeap->rd_rel->relisshared,
@@ -745,8 +745,8 @@ swap_relation_files(Oid r1, Oid r2)
* their new owning relations. Otherwise the wrong one will get
* dropped ...
*
- * NOTE: it is possible that only one table has a toast table; this
- * can happen in CLUSTER if there were dropped columns in the old table,
+ * NOTE: it is possible that only one table has a toast table; this can
+ * happen in CLUSTER if there were dropped columns in the old table,
* and in ALTER TABLE when adding or changing type of columns.
*
* NOTE: at present, a TOAST table's only dependency is the one on its
@@ -802,15 +802,15 @@ swap_relation_files(Oid r1, Oid r2)
/*
* Blow away the old relcache entries now. We need this kluge because
* relcache.c keeps a link to the smgr relation for the physical file,
- * and that will be out of date as soon as we do CommandCounterIncrement.
- * Whichever of the rels is the second to be cleared during cache
- * invalidation will have a dangling reference to an already-deleted smgr
- * relation. Rather than trying to avoid this by ordering operations
- * just so, it's easiest to not have the relcache entries there at all.
- * (Fortunately, since one of the entries is local in our transaction,
- * it's sufficient to clear out our own relcache this way; the problem
- * cannot arise for other backends when they see our update on the
- * non-local relation.)
+ * and that will be out of date as soon as we do
+ * CommandCounterIncrement. Whichever of the rels is the second to be
+ * cleared during cache invalidation will have a dangling reference to
+ * an already-deleted smgr relation. Rather than trying to avoid this
+ * by ordering operations just so, it's easiest to not have the
+ * relcache entries there at all. (Fortunately, since one of the
+ * entries is local in our transaction, it's sufficient to clear out
+ * our own relcache this way; the problem cannot arise for other
+ * backends when they see our update on the non-local relation.)
*/
RelationForgetRelation(r1);
RelationForgetRelation(r2);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 8e3e3a8cafe..8a1b2e0c4c8 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.78 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.79 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -123,10 +123,10 @@ CommentObject(CommentStmt *stmt)
CommentOpClass(stmt->objname, stmt->objargs, stmt->comment);
break;
case OBJECT_LARGEOBJECT:
- CommentLargeObject(stmt->objname, stmt->comment);
+ CommentLargeObject(stmt->objname, stmt->comment);
break;
case OBJECT_CAST:
- CommentCast(stmt->objname, stmt->objargs, stmt->comment);
+ CommentCast(stmt->objname, stmt->objargs, stmt->comment);
break;
default:
elog(ERROR, "unrecognized object type: %d",
@@ -401,8 +401,8 @@ CommentAttribute(List *qualname, char *comment)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- attrname, RelationGetRelationName(relation))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ attrname, RelationGetRelationName(relation))));
/* Create the comment using the relation's oid */
@@ -462,7 +462,8 @@ CommentDatabase(List *qualname, char *comment)
/* Only allow comments on the current database */
if (oid != MyDatabaseId)
{
- ereport(WARNING, /* throw just a warning so pg_restore doesn't fail */
+ ereport(WARNING, /* throw just a warning so pg_restore
+ * doesn't fail */
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("database comments may only be applied to the current database")));
return;
@@ -586,7 +587,7 @@ CommentRule(List *qualname, char *comment)
ForwardScanDirection)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple rules named \"%s\"", rulename),
+ errmsg("there are multiple rules named \"%s\"", rulename),
errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
@@ -615,8 +616,8 @@ CommentRule(List *qualname, char *comment)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" does not exist",
- rulename, RelationGetRelationName(relation))));
+ errmsg("rule \"%s\" for relation \"%s\" does not exist",
+ rulename, RelationGetRelationName(relation))));
Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class);
ruleoid = HeapTupleGetOid(tuple);
ReleaseSysCache(tuple);
@@ -832,8 +833,8 @@ CommentTrigger(List *qualname, char *comment)
if (!HeapTupleIsValid(triggertuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for table \"%s\" does not exist",
- trigname, RelationGetRelationName(relation))));
+ errmsg("trigger \"%s\" for table \"%s\" does not exist",
+ trigname, RelationGetRelationName(relation))));
oid = HeapTupleGetOid(triggertuple);
@@ -924,8 +925,8 @@ CommentConstraint(List *qualname, char *comment)
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for table \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for table \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Create the comment with the pg_constraint oid */
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
@@ -1003,7 +1004,7 @@ CommentLanguage(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to comment on procedural language")));
+ errmsg("must be superuser to comment on procedural language")));
/* pg_language doesn't have a hard-coded OID, so must look it up */
classoid = get_system_catalog_relid(LanguageRelationName);
@@ -1084,7 +1085,7 @@ CommentOpClass(List *qualname, List *arguments, char *comment)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(qualname), amname)));
+ NameListToString(qualname), amname)));
opcID = HeapTupleGetOid(tuple);
@@ -1116,7 +1117,7 @@ CommentLargeObject(List *qualname, char *comment)
{
Oid loid;
Oid classoid;
- Node *node;
+ Node *node;
Assert(list_length(qualname) == 1);
node = (Node *) linitial(qualname);
@@ -1127,19 +1128,20 @@ CommentLargeObject(List *qualname, char *comment)
loid = intVal(node);
break;
case T_Float:
+
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid
- * OID strings.
+ * constants by the lexer. Accept these if they are valid OID
+ * strings.
*/
loid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(strVal(node))));
+ CStringGetDatum(strVal(node))));
break;
default:
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(node));
/* keep compiler quiet */
- loid = InvalidOid;
+ loid = InvalidOid;
}
/* check that the large object exists */
@@ -1152,7 +1154,7 @@ CommentLargeObject(List *qualname, char *comment)
classoid = get_system_catalog_relid(LargeObjectRelationName);
/* Call CreateComments() to create/drop the comments */
- CreateComments(loid, classoid, 0, comment);
+ CreateComments(loid, classoid, 0, comment);
}
/*
@@ -1182,7 +1184,7 @@ CommentCast(List *qualname, List *arguments, char *comment)
Assert(list_length(arguments) == 1);
targettype = (TypeName *) linitial(arguments);
Assert(IsA(targettype, TypeName));
-
+
sourcetypeid = typenameTypeId(sourcetype);
if (!OidIsValid(sourcetypeid))
ereport(ERROR,
@@ -1210,7 +1212,7 @@ CommentCast(List *qualname, List *arguments, char *comment)
/* Get the OID of the cast */
castOid = HeapTupleGetOid(tuple);
-
+
/* Permission check */
if (!pg_type_ownercheck(sourcetypeid, GetUserId())
&& !pg_type_ownercheck(targettypeid, GetUserId()))
@@ -1226,5 +1228,5 @@ CommentCast(List *qualname, List *arguments, char *comment)
classoid = get_system_catalog_relid(CastRelationName);
/* Call CreateComments() to create/drop the comments */
- CreateComments(castOid, classoid, 0, comment);
+ CreateComments(castOid, classoid, 0, comment);
}
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index 44b2ef266f7..751e0b9152e 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.14 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.15 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -181,7 +181,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId)
Oid conversionOid;
HeapTuple tup;
Relation rel;
- Form_pg_conversion convForm;
+ Form_pg_conversion convForm;
rel = heap_openr(ConversionRelationName, RowExclusiveLock);
@@ -200,7 +200,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId)
convForm = (Form_pg_conversion) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -212,7 +212,10 @@ AlterConversionOwner(List *name, AclId newOwnerSysId)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
convForm->conowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index d060785d8d6..5793c0b2bbb 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.229 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.230 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -133,22 +133,22 @@ static void DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
char *delim, char *null_print, bool csv_mode, char *quote,
char *escape, List *force_quote_atts, bool fe_copy);
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
+ char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
List *force_quote_atts);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
+ char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
List *force_notnull_atts);
static bool CopyReadLine(void);
static char *CopyReadAttribute(const char *delim, const char *null_print,
- CopyReadResult *result, bool *isnull);
+ CopyReadResult *result, bool *isnull);
static char *CopyReadAttributeCSV(const char *delim, const char *null_print,
- char *quote, char *escape,
- CopyReadResult *result, bool *isnull);
+ char *quote, char *escape,
+ CopyReadResult *result, bool *isnull);
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
Oid typioparam, bool *isnull);
static void CopyAttributeOut(char *string, char *delim);
static void CopyAttributeOutCSV(char *string, char *delim, char *quote,
- char *escape, bool force_quote);
+ char *escape, bool force_quote);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
static void limit_printout_length(StringInfo buf);
@@ -413,7 +413,7 @@ CopyGetData(void *databuf, int datasize)
/* Try to receive another message */
int mtype;
- readmessage:
+ readmessage:
mtype = pq_getbyte();
if (mtype == EOF)
ereport(ERROR,
@@ -439,11 +439,12 @@ CopyGetData(void *databuf, int datasize)
break;
case 'H': /* Flush */
case 'S': /* Sync */
+
/*
* Ignore Flush/Sync for the convenience of
* client libraries (such as libpq) that may
- * send those without noticing that the command
- * they just sent was COPY.
+ * send those without noticing that the
+ * command they just sent was COPY.
*/
goto readmessage;
default:
@@ -693,7 +694,7 @@ DoCopy(const CopyStmt *stmt)
bool fe_copy = false;
bool binary = false;
bool oids = false;
- bool csv_mode = false;
+ bool csv_mode = false;
char *delim = NULL;
char *quote = NULL;
char *escape = NULL;
@@ -773,7 +774,7 @@ DoCopy(const CopyStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting or redundant options")));
- force_quote = (List *)defel->arg;
+ force_quote = (List *) defel->arg;
}
else if (strcmp(defel->defname, "force_notnull") == 0)
{
@@ -781,7 +782,7 @@ DoCopy(const CopyStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting or redundant options")));
- force_notnull = (List *)defel->arg;
+ force_notnull = (List *) defel->arg;
}
else
elog(ERROR, "option \"%s\" not recognized",
@@ -806,7 +807,7 @@ DoCopy(const CopyStmt *stmt)
/* Set defaults */
if (!delim)
delim = csv_mode ? "," : "\t";
-
+
if (!null_print)
null_print = csv_mode ? "" : "\\N";
@@ -817,7 +818,7 @@ DoCopy(const CopyStmt *stmt)
if (!escape)
escape = quote;
}
-
+
/*
* Only single-character delimiter strings are supported.
*/
@@ -862,7 +863,7 @@ DoCopy(const CopyStmt *stmt)
if (force_quote != NIL && is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force quote only available using COPY TO")));
+ errmsg("COPY force quote only available using COPY TO")));
/*
* Check force_notnull
@@ -870,11 +871,11 @@ DoCopy(const CopyStmt *stmt)
if (!csv_mode && force_notnull != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null available only in CSV mode")));
+ errmsg("COPY force not null available only in CSV mode")));
if (force_notnull != NIL && !is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null only available using COPY FROM")));
+ errmsg("COPY force not null only available using COPY FROM")));
/*
* Don't allow the delimiter to appear in the null string.
@@ -948,11 +949,11 @@ DoCopy(const CopyStmt *stmt)
if (!list_member_int(attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
- NameStr(attr[attnum - 1]->attname))));
+ errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
+ NameStr(attr[attnum - 1]->attname))));
}
}
-
+
/*
* Check that FORCE NOT NULL references valid COPY columns
*/
@@ -975,7 +976,7 @@ DoCopy(const CopyStmt *stmt)
NameStr(attr[attnum - 1]->attname))));
}
}
-
+
/*
* Set up variables to avoid per-attribute overhead.
*/
@@ -1152,9 +1153,9 @@ DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
PG_CATCH();
{
/*
- * Make sure we turn off old-style COPY OUT mode upon error.
- * It is okay to do this in all cases, since it does nothing
- * if the mode is not on.
+ * Make sure we turn off old-style COPY OUT mode upon error. It is
+ * okay to do this in all cases, since it does nothing if the mode
+ * is not on.
*/
pq_endcopyout(true);
PG_RE_THROW();
@@ -1202,10 +1203,10 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
{
int attnum = lfirst_int(cur);
Oid out_func_oid;
-
+
if (binary)
getTypeBinaryOutputInfo(attr[attnum - 1]->atttypid,
- &out_func_oid, &typioparams[attnum - 1],
+ &out_func_oid, &typioparams[attnum - 1],
&isvarlena[attnum - 1]);
else
getTypeOutputInfo(attr[attnum - 1]->atttypid,
@@ -1266,6 +1267,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL)
{
bool need_delim = false;
+
CHECK_FOR_INTERRUPTS();
MemoryContextReset(mycontext);
@@ -1325,13 +1327,13 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
{
string = DatumGetCString(FunctionCall3(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(typioparams[attnum - 1]),
+ ObjectIdGetDatum(typioparams[attnum - 1]),
Int32GetDatum(attr[attnum - 1]->atttypmod)));
if (csv_mode)
{
CopyAttributeOutCSV(string, delim, quote, escape,
- (strcmp(string, null_print) == 0 ||
- force_quote[attnum - 1]));
+ (strcmp(string, null_print) == 0 ||
+ force_quote[attnum - 1]));
}
else
CopyAttributeOut(string, delim);
@@ -1343,7 +1345,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(typioparams[attnum - 1])));
+ ObjectIdGetDatum(typioparams[attnum - 1])));
/* We assume the result will not have been toasted */
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
CopySendData(VARDATA(outputbytes),
@@ -1444,7 +1446,7 @@ limit_printout_length(StringInfo buf)
{
#define MAX_COPY_DATA_DISPLAY 100
- int len;
+ int len;
/* Fast path if definitely okay */
if (buf->len <= MAX_COPY_DATA_DISPLAY)
@@ -1551,7 +1553,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/* Fetch the input function and typioparam info */
if (binary)
getTypeBinaryInputInfo(attr[attnum - 1]->atttypid,
- &in_func_oid, &typioparams[attnum - 1]);
+ &in_func_oid, &typioparams[attnum - 1]);
else
getTypeInputInfo(attr[attnum - 1]->atttypid,
&in_func_oid, &typioparams[attnum - 1]);
@@ -1561,7 +1563,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
force_notnull[attnum - 1] = true;
else
force_notnull[attnum - 1] = false;
-
+
/* Get default info if needed */
if (!list_member_int(attnumlist, attnum))
{
@@ -1603,7 +1605,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
COERCE_IMPLICIT_CAST, false);
constraintexprs[attnum - 1] = ExecPrepareExpr((Expr *) node,
- estate);
+ estate);
hasConstraints = true;
}
}
@@ -1718,10 +1720,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
done = CopyReadLine();
/*
- * EOF at start of line means we're done. If we see EOF
- * after some characters, we act as though it was newline
- * followed by EOF, ie, process the line and then exit loop
- * on next iteration.
+ * EOF at start of line means we're done. If we see EOF after
+ * some characters, we act as though it was newline followed
+ * by EOF, ie, process the line and then exit loop on next
+ * iteration.
*/
if (done && line_buf.len == 0)
break;
@@ -1770,29 +1772,29 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (csv_mode)
{
string = CopyReadAttributeCSV(delim, null_print, quote,
- escape, &result, &isnull);
+ escape, &result, &isnull);
if (result == UNTERMINATED_FIELD)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unterminated CSV quoted field")));
+ errmsg("unterminated CSV quoted field")));
}
else
- string = CopyReadAttribute(delim, null_print,
+ string = CopyReadAttribute(delim, null_print,
&result, &isnull);
if (csv_mode && isnull && force_notnull[m])
{
- string = null_print; /* set to NULL string */
+ string = null_print; /* set to NULL string */
isnull = false;
}
- /* we read an SQL NULL, no need to do anything */
+ /* we read an SQL NULL, no need to do anything */
if (!isnull)
{
copy_attname = NameStr(attr[m]->attname);
values[m] = FunctionCall3(&in_functions[m],
CStringGetDatum(string),
- ObjectIdGetDatum(typioparams[m]),
+ ObjectIdGetDatum(typioparams[m]),
Int32GetDatum(attr[m]->atttypmod));
nulls[m] = ' ';
copy_attname = NULL;
@@ -1809,7 +1811,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (result == NORMAL_ATTR && line_buf.len != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
}
else
{
@@ -1835,8 +1837,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
copy_attname = "oid";
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(0,
- &oid_in_function,
- oid_typioparam,
+ &oid_in_function,
+ oid_typioparam,
&isnull));
if (isnull || loaded_oid == InvalidOid)
ereport(ERROR,
@@ -2022,14 +2024,14 @@ CopyReadLine(void)
result = false;
/*
- * In this loop we only care for detecting newlines (\r and/or \n)
- * and the end-of-copy marker (\.). For backwards compatibility
- * we allow backslashes to escape newline characters. Backslashes
- * other than the end marker get put into the line_buf, since
- * CopyReadAttribute does its own escape processing. These four
- * characters, and only these four, are assumed the same in frontend
- * and backend encodings. We do not assume that second and later bytes
- * of a frontend multibyte character couldn't look like ASCII characters.
+ * In this loop we only care for detecting newlines (\r and/or \n) and
+ * the end-of-copy marker (\.). For backwards compatibility we allow
+ * backslashes to escape newline characters. Backslashes other than
+ * the end marker get put into the line_buf, since CopyReadAttribute
+ * does its own escape processing. These four characters, and only
+ * these four, are assumed the same in frontend and backend encodings.
+ * We do not assume that second and later bytes of a frontend
+ * multibyte character couldn't look like ASCII characters.
*/
for (;;)
{
@@ -2120,9 +2122,9 @@ CopyReadLine(void)
errmsg("end-of-copy marker does not match previous newline style")));
/*
- * In protocol version 3, we should ignore anything
- * after \. up to the protocol end of copy data. (XXX
- * maybe better not to treat \. as special?)
+ * In protocol version 3, we should ignore anything after
+ * \. up to the protocol end of copy data. (XXX maybe
+ * better not to treat \. as special?)
*/
if (copy_dest == COPY_NEW_FE)
{
@@ -2140,10 +2142,10 @@ CopyReadLine(void)
/*
* When client encoding != server, must be careful to read the
- * extra bytes of a multibyte character exactly, since the encoding
- * might not ensure they don't look like ASCII. When the encodings
- * are the same, we need not do this, since no server encoding we
- * use has ASCII-like following bytes.
+ * extra bytes of a multibyte character exactly, since the
+ * encoding might not ensure they don't look like ASCII. When the
+ * encodings are the same, we need not do this, since no server
+ * encoding we use has ASCII-like following bytes.
*/
if (change_encoding)
{
@@ -2162,7 +2164,7 @@ CopyReadLine(void)
if (result)
break; /* out of outer loop */
}
- } /* end of outer loop */
+ } /* end of outer loop */
/*
* Done reading the line. Convert it to server encoding.
@@ -2170,8 +2172,9 @@ CopyReadLine(void)
* Note: set line_buf_converted to true *before* attempting conversion;
* this prevents infinite recursion during error reporting should
* pg_client_to_server() issue an error, due to copy_in_error_callback
- * again attempting the same conversion. We'll end up issuing the message
- * without conversion, which is bad but better than nothing ...
+ * again attempting the same conversion. We'll end up issuing the
+ * message without conversion, which is bad but better than nothing
+ * ...
*/
line_buf_converted = true;
@@ -2295,9 +2298,11 @@ CopyReadAttribute(const char *delim, const char *null_print,
case 'v':
c = '\v';
break;
- /*
- * in all other cases, take the char after '\' literally
- */
+
+ /*
+ * in all other cases, take the char after '\'
+ * literally
+ */
}
}
appendStringInfoCharMacro(&attribute_buf, c);
@@ -2316,7 +2321,7 @@ CopyReadAttribute(const char *delim, const char *null_print,
/*
- * Read the value of a single attribute in CSV mode,
+ * Read the value of a single attribute in CSV mode,
* performing de-escaping as needed. Escaping does not follow the normal
* PostgreSQL text mode, but instead "standard" (i.e. common) CSV usage.
*
@@ -2329,7 +2334,7 @@ CopyReadAttribute(const char *delim, const char *null_print,
* *result is set to indicate what terminated the read:
* NORMAL_ATTR: column delimiter
* END_OF_LINE: end of line
- * UNTERMINATED_FIELD no quote detected at end of a quoted field
+ * UNTERMINATED_FIELD no quote detected at end of a quoted field
*
* In any case, the string read up to the terminator (or end of file)
* is returned.
@@ -2345,15 +2350,15 @@ static char *
CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
char *escape, CopyReadResult *result, bool *isnull)
{
- char delimc = delim[0];
- char quotec = quote[0];
- char escapec = escape[0];
+ char delimc = delim[0];
+ char quotec = quote[0];
+ char escapec = escape[0];
char c;
int start_cursor = line_buf.cursor;
int end_cursor = start_cursor;
int input_len;
- bool in_quote = false;
- bool saw_quote = false;
+ bool in_quote = false;
+ bool saw_quote = false;
/* reset attribute_buf to empty */
attribute_buf.len = 0;
@@ -2367,18 +2372,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
/* handle multiline quoted fields */
if (in_quote && line_buf.cursor >= line_buf.len)
{
- bool done;
+ bool done;
- switch(eol_type)
+ switch (eol_type)
{
case EOL_NL:
- appendStringInfoString(&attribute_buf,"\n");
+ appendStringInfoString(&attribute_buf, "\n");
break;
case EOL_CR:
- appendStringInfoString(&attribute_buf,"\r");
+ appendStringInfoString(&attribute_buf, "\r");
break;
case EOL_CRNL:
- appendStringInfoString(&attribute_buf,"\r\n");
+ appendStringInfoString(&attribute_buf, "\r\n");
break;
case EOL_UNKNOWN:
/* shouldn't happen - just keep going */
@@ -2396,16 +2401,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
if (line_buf.cursor >= line_buf.len)
break;
c = line_buf.data[line_buf.cursor++];
- /*
- * unquoted field delimiter
+
+ /*
+ * unquoted field delimiter
*/
if (!in_quote && c == delimc)
{
*result = NORMAL_ATTR;
break;
}
- /*
- * start of quoted field (or part of field)
+
+ /*
+ * start of quoted field (or part of field)
*/
if (!in_quote && c == quotec)
{
@@ -2413,18 +2420,20 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
in_quote = true;
continue;
}
- /*
+
+ /*
* escape within a quoted field
*/
if (in_quote && c == escapec)
{
- /*
- * peek at the next char if available, and escape it if it
- * is an escape char or a quote char
+ /*
+ * peek at the next char if available, and escape it if it is
+ * an escape char or a quote char
*/
if (line_buf.cursor <= line_buf.len)
{
- char nextc = line_buf.data[line_buf.cursor];
+ char nextc = line_buf.data[line_buf.cursor];
+
if (nextc == escapec || nextc == quotec)
{
appendStringInfoCharMacro(&attribute_buf, nextc);
@@ -2433,10 +2442,11 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
}
}
}
+
/*
- * end of quoted field.
- * Must do this test after testing for escape in case quote char
- * and escape char are the same (which is the common case).
+ * end of quoted field. Must do this test after testing for escape
+ * in case quote char and escape char are the same (which is the
+ * common case).
*/
if (in_quote && c == quotec)
{
@@ -2586,7 +2596,7 @@ CopyAttributeOut(char *server_string, char *delim)
}
/*
- * Send CSV representation of one attribute, with conversion and
+ * Send CSV representation of one attribute, with conversion and
* CSV type escaping
*/
static void
@@ -2596,9 +2606,9 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote,
char *string;
char c;
char delimc = delim[0];
- char quotec = quote[0];
- char escapec = escape[0];
- char *test_string;
+ char quotec = quote[0];
+ char escapec = escape[0];
+ char *test_string;
bool same_encoding;
int mblen;
int i;
@@ -2610,13 +2620,14 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote,
else
string = server_string;
- /* have to run through the string twice,
- * first time to see if it needs quoting, second to actually send it
+ /*
+ * have to run through the string twice, first time to see if it needs
+ * quoting, second to actually send it
*/
- for(test_string = string;
- !use_quote && (c = *test_string) != '\0';
- test_string += mblen)
+ for (test_string = string;
+ !use_quote && (c = *test_string) != '\0';
+ test_string += mblen)
{
if (c == delimc || c == quotec || c == '\n' || c == '\r')
use_quote = true;
@@ -2695,8 +2706,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
if (list_member_int(attnums, attnum))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
attnums = lappend_int(attnums, attnum);
}
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 055b7be7eb3..f7ef440b02e 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.140 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.141 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,7 +78,7 @@ createdb(const CreatedbStmt *stmt)
Oid dboid;
AclId datdba;
ListCell *option;
- DefElem *dtablespacename = NULL;
+ DefElem *dtablespacename = NULL;
DefElem *downer = NULL;
DefElem *dtemplate = NULL;
DefElem *dencoding = NULL;
@@ -86,6 +86,7 @@ createdb(const CreatedbStmt *stmt)
char *dbowner = NULL;
char *dbtemplate = NULL;
int encoding = -1;
+
#ifndef WIN32
char buf[2 * MAXPGPATH + 100];
#endif
@@ -224,7 +225,7 @@ createdb(const CreatedbStmt *stmt)
&src_vacuumxid, &src_frozenxid, &src_deftablespace))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("template database \"%s\" does not exist", dbtemplate)));
+ errmsg("template database \"%s\" does not exist", dbtemplate)));
/*
* Permission check: to copy a DB that's not marked datistemplate, you
@@ -265,7 +266,7 @@ createdb(const CreatedbStmt *stmt)
if (dtablespacename && dtablespacename->arg)
{
char *tablespacename;
- AclResult aclresult;
+ AclResult aclresult;
tablespacename = strVal(dtablespacename->arg);
dst_deftablespace = get_tablespace_oid(tablespacename);
@@ -275,11 +276,11 @@ createdb(const CreatedbStmt *stmt)
errmsg("tablespace \"%s\" does not exist",
tablespacename)));
/* check permissions */
- aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(),
+ aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(),
ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
- tablespacename);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
+ tablespacename);
}
else
{
@@ -308,22 +309,22 @@ createdb(const CreatedbStmt *stmt)
closeAllVfds();
/*
- * Iterate through all tablespaces of the template database, and
- * copy each one to the new database.
+ * Iterate through all tablespaces of the template database, and copy
+ * each one to the new database.
*
- * If we are trying to change the default tablespace of the template,
- * we require that the template not have any files in the new default
- * tablespace. This avoids the need to merge two subdirectories.
- * This could probably be improved later.
+ * If we are trying to change the default tablespace of the template, we
+ * require that the template not have any files in the new default
+ * tablespace. This avoids the need to merge two subdirectories. This
+ * could probably be improved later.
*/
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid srctablespace = HeapTupleGetOid(tuple);
- Oid dsttablespace;
- char *srcpath;
- char *dstpath;
+ Oid srctablespace = HeapTupleGetOid(tuple);
+ Oid dsttablespace;
+ char *srcpath;
+ char *dstpath;
struct stat st;
/* No need to copy global tablespace */
@@ -351,10 +352,11 @@ createdb(const CreatedbStmt *stmt)
remove_dbtablespaces(dboid);
ereport(ERROR,
(errmsg("could not initialize database directory"),
- errdetail("Directory \"%s\" already exists.", dstpath)));
+ errdetail("Directory \"%s\" already exists.", dstpath)));
}
#ifndef WIN32
+
/*
* Copy this subdirectory to the new location
*
@@ -374,7 +376,7 @@ createdb(const CreatedbStmt *stmt)
errdetail("Failing system command was: %s", buf),
errhint("Look in the postmaster's stderr log for more information.")));
}
-#else /* WIN32 */
+#else /* WIN32 */
if (copydir(srcpath, dstpath) != 0)
{
/* copydir should already have given details of its troubles */
@@ -382,7 +384,7 @@ createdb(const CreatedbStmt *stmt)
ereport(ERROR,
(errmsg("could not initialize database directory")));
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
heap_endscan(scan);
heap_close(rel, AccessShareLock);
@@ -772,7 +774,7 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId)
Relation rel;
ScanKeyData scankey;
SysScanDesc scan;
- Form_pg_database datForm;
+ Form_pg_database datForm;
rel = heap_openr(DatabaseRelationName, RowExclusiveLock);
ScanKeyInit(&scankey,
@@ -789,16 +791,17 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId)
datForm = (Form_pg_database) GETSTRUCT(tuple);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
- * command to have succeeded. This is to be consistent with other objects.
+ * command to have succeeded. This is to be consistent with other
+ * objects.
*/
if (datForm->datdba != newOwnerSysId)
{
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
char repl_repl[Natts_pg_database];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
@@ -821,9 +824,9 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId)
* necessary when the ACL is non-null.
*/
aclDatum = heap_getattr(tuple,
- Anum_pg_database_datacl,
- RelationGetDescr(rel),
- &isNull);
+ Anum_pg_database_datacl,
+ RelationGetDescr(rel),
+ &isNull);
if (!isNull)
{
newAcl = aclnewowner(DatumGetAclP(aclDatum),
@@ -941,16 +944,16 @@ have_createdb_privilege(void)
static void
remove_dbtablespaces(Oid db_id)
{
- Relation rel;
+ Relation rel;
HeapScanDesc scan;
- HeapTuple tuple;
+ HeapTuple tuple;
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid dsttablespace = HeapTupleGetOid(tuple);
- char *dstpath;
+ Oid dsttablespace = HeapTupleGetOid(tuple);
+ char *dstpath;
struct stat st;
/* Don't mess with the global tablespace */
@@ -969,9 +972,9 @@ remove_dbtablespaces(Oid db_id)
if (!rmtree(dstpath, true))
{
ereport(WARNING,
- (errmsg("could not remove database directory \"%s\"",
- dstpath),
- errhint("Look in the postmaster's stderr log for more information.")));
+ (errmsg("could not remove database directory \"%s\"",
+ dstpath),
+ errhint("Look in the postmaster's stderr log for more information.")));
}
pfree(dstpath);
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index cc2643d6373..dc2ea2974a7 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.90 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.91 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -126,8 +126,8 @@ bool
defGetBoolean(DefElem *def)
{
/*
- * Presently, boolean flags must simply be present or absent.
- * Later we could allow 'flag = t', 'flag = f', etc.
+ * Presently, boolean flags must simply be present or absent. Later we
+ * could allow 'flag = t', 'flag = f', etc.
*/
if (def->arg == NULL)
return true;
@@ -265,7 +265,7 @@ defGetTypeLength(DefElem *def)
case T_TypeName:
/* cope if grammar chooses to believe "variable" is a typename */
if (pg_strcasecmp(TypeNameToString((TypeName *) def->arg),
- "variable") == 0)
+ "variable") == 0)
return -1; /* variable length */
break;
case T_List:
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 29b4f30fce7..7ad3596fac6 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.123 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.124 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -317,7 +317,7 @@ explain_outNode(StringInfo str,
Plan *outer_plan,
int indent, ExplainState *es)
{
- ListCell *l;
+ ListCell *l;
char *pname;
int i;
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 8a139e56012..7bce0b9b9d2 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -3,14 +3,14 @@
* functioncmds.c
*
* Routines for CREATE and DROP FUNCTION commands and CREATE and DROP
- * CAST commands.
+ * CAST commands.
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.51 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.52 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -449,14 +449,14 @@ CreateFunction(CreateFunctionStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("language \"%s\" does not exist", languageName),
- (strcmp(languageName, "plperl") == 0 ||
- strcmp(languageName, "plperlu") == 0 ||
- strcmp(languageName, "plpgsql") == 0 ||
- strcmp(languageName, "plpythonu") == 0 ||
- strcmp(languageName, "pltcl") == 0 ||
- strcmp(languageName, "pltclu") == 0) ?
+ (strcmp(languageName, "plperl") == 0 ||
+ strcmp(languageName, "plperlu") == 0 ||
+ strcmp(languageName, "plpgsql") == 0 ||
+ strcmp(languageName, "plpythonu") == 0 ||
+ strcmp(languageName, "pltcl") == 0 ||
+ strcmp(languageName, "pltclu") == 0) ?
errhint("You need to use \"createlang\" to load the language into the database.") : 0));
-
+
languageOid = HeapTupleGetOid(languageTuple);
languageStruct = (Form_pg_language) GETSTRUCT(languageTuple);
@@ -490,7 +490,7 @@ CreateFunction(CreateFunctionStmt *stmt)
&prorettype, &returnsSet);
parameterCount = examine_parameter_list(stmt->parameters, languageOid,
- parameterTypes, parameterNames);
+ parameterTypes, parameterNames);
compute_attributes_with_style(stmt->withClause, &isStrict, &volatility);
@@ -739,8 +739,8 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId)
procOid = LookupFuncNameTypeNames(name, argtypes, false);
tup = SearchSysCache(PROCOID,
- ObjectIdGetDatum(procOid),
- 0, 0, 0);
+ ObjectIdGetDatum(procOid),
+ 0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for function %u", procOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
@@ -750,9 +750,9 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to change owner of aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to change owner of aggregate functions.")));
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -761,7 +761,7 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId)
Datum repl_val[Natts_pg_proc];
char repl_null[Natts_pg_proc];
char repl_repl[Natts_pg_proc];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
@@ -968,7 +968,7 @@ CreateCast(CreateCastStmt *stmt)
if (nargs < 1 || nargs > 3)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must take one to three arguments")));
+ errmsg("cast function must take one to three arguments")));
if (procstruct->proargtypes[0] != sourcetypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 04761fac688..6e550e67c68 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.124 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.125 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,10 +47,10 @@
/* non-export function prototypes */
static void CheckPredicate(Expr *predicate);
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId,
- bool isconstraint);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId,
+ bool isconstraint);
static Oid GetIndexOpClass(List *opclass, Oid attrType,
char *accessMethodName, Oid accessMethodId);
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
@@ -143,7 +143,8 @@ DefineIndex(RangeVar *heapRelation,
* Verify we (still) have CREATE rights in the rel's namespace.
* (Presumably we did when the rel was created, but maybe not
* anymore.) Skip check if caller doesn't want it. Also skip check
- * if bootstrapping, since permissions machinery may not be working yet.
+ * if bootstrapping, since permissions machinery may not be working
+ * yet.
*/
if (check_rights && !IsBootstrapProcessingMode())
{
@@ -159,7 +160,7 @@ DefineIndex(RangeVar *heapRelation,
/* Determine tablespace to use */
if (tableSpaceName)
{
- AclResult aclresult;
+ AclResult aclresult;
tablespaceId = get_tablespace_oid(tableSpaceName);
if (!OidIsValid(tablespaceId))
@@ -173,7 +174,9 @@ DefineIndex(RangeVar *heapRelation,
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
tableSpaceName);
- } else {
+ }
+ else
+ {
/* Use the parent rel's tablespace */
tablespaceId = get_rel_tablespace(relationId);
/* Note there is no additional permission check in this path */
@@ -256,9 +259,9 @@ DefineIndex(RangeVar *heapRelation,
/*
* If ALTER TABLE, check that there isn't already a PRIMARY KEY.
- * In CREATE TABLE, we have faith that the parser rejected multiple
- * pkey clauses; and CREATE INDEX doesn't have a way to say
- * PRIMARY KEY, so it's no problem either.
+ * In CREATE TABLE, we have faith that the parser rejected
+ * multiple pkey clauses; and CREATE INDEX doesn't have a way to
+ * say PRIMARY KEY, so it's no problem either.
*/
if (is_alter_table &&
relationHasPrimaryKey(rel))
@@ -270,8 +273,8 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Check that all of the attributes in a primary key are marked as not
- * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as
+ * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
cmds = NIL;
foreach(keys, attributeList)
@@ -294,7 +297,7 @@ DefineIndex(RangeVar *heapRelation,
if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
{
/* Add a subcommand to make this one NOT NULL */
- AlterTableCmd *cmd = makeNode(AlterTableCmd);
+ AlterTableCmd *cmd = makeNode(AlterTableCmd);
cmd->subtype = AT_SetNotNull;
cmd->name = key->name;
@@ -318,15 +321,15 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
- * to child tables? Currently, since the PRIMARY KEY
- * itself doesn't cascade, we don't cascade the
- * notnull constraint(s) either; but this is pretty debatable.
+ * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
+ * tables? Currently, since the PRIMARY KEY itself doesn't
+ * cascade, we don't cascade the notnull constraint(s) either; but
+ * this is pretty debatable.
*
- * XXX: possible future improvement: when being called from
- * ALTER TABLE, it would be more efficient to merge this with
- * the outer ALTER TABLE, so as to avoid two scans. But that
- * seems to complicate DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER
+ * TABLE, it would be more efficient to merge this with the outer
+ * ALTER TABLE, so as to avoid two scans. But that seems to
+ * complicate DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
@@ -352,15 +355,15 @@ DefineIndex(RangeVar *heapRelation,
heap_close(rel, NoLock);
/*
- * Report index creation if appropriate (delay this till after most
- * of the error checks)
+ * Report index creation if appropriate (delay this till after most of
+ * the error checks)
*/
if (isconstraint && !quiet)
ereport(NOTICE,
(errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
- is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
+ is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
primary ? "PRIMARY KEY" : "UNIQUE",
- indexRelationName, RelationGetRelationName(rel))));
+ indexRelationName, RelationGetRelationName(rel))));
index_create(relationId, indexRelationName,
indexInfo, accessMethodId, tablespaceId, classObjectId,
@@ -450,8 +453,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
if (isconstraint)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- attribute->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ attribute->name)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
@@ -488,11 +491,11 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in index expression")));
+ errmsg("cannot use subquery in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in index expression")));
+ errmsg("cannot use aggregate function in index expression")));
/*
* A expression using mutable functions is probably wrong,
@@ -647,7 +650,7 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId)
* than one exact match, then someone put bogus entries in pg_opclass.
*
* The initial search is done by namespace.c so that we only consider
- * opclasses visible in the current namespace search path. (See also
+ * opclasses visible in the current namespace search path. (See also
* typcache.c, which applies the same logic, but over all opclasses.)
*/
for (opclass = OpclassGetCandidates(accessMethodId);
@@ -962,16 +965,16 @@ ReindexTable(RangeVar *relation, bool force /* currently unused */ )
* separate transaction, so we can release the lock on it right away.
*/
void
-ReindexDatabase(const char *dbname, bool force /* currently unused */,
+ReindexDatabase(const char *dbname, bool force /* currently unused */ ,
bool all)
{
- Relation relationRelation;
+ Relation relationRelation;
HeapScanDesc scan;
- HeapTuple tuple;
+ HeapTuple tuple;
MemoryContext private_context;
MemoryContext old;
- List *relids = NIL;
- ListCell *l;
+ List *relids = NIL;
+ ListCell *l;
AssertArg(dbname);
@@ -1006,7 +1009,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */,
/*
* We always want to reindex pg_class first. This ensures that if
* there is any corruption in pg_class' indexes, they will be fixed
- * before we process any other tables. This is critical because
+ * before we process any other tables. This is critical because
* reindexing itself will try to update pg_class.
*/
old = MemoryContextSwitchTo(private_context);
@@ -1054,7 +1057,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */,
CommitTransactionCommand();
foreach(l, relids)
{
- Oid relid = lfirst_oid(l);
+ Oid relid = lfirst_oid(l);
StartTransactionCommand();
SetQuerySnapshot(); /* might be needed for functions in
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 0b2bc391f1b..e0f58d9ab2b 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.27 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.28 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -300,8 +300,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
errmsg("could not make operator class \"%s\" be default for type %s",
opcname,
TypeNameToString(stmt->datatype)),
- errdetail("Operator class \"%s\" already is the default.",
- NameStr(opclass->opcname))));
+ errdetail("Operator class \"%s\" already is the default.",
+ NameStr(opclass->opcname))));
}
systable_endscan(scan);
@@ -419,6 +419,7 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid)
if (optup == NULL)
elog(ERROR, "cache lookup failed for operator %u", operOid);
opform = (Form_pg_operator) GETSTRUCT(optup);
+
/*
* btree operators must be binary ops returning boolean, and the
* left-side input type must match the operator class' input type.
@@ -434,10 +435,11 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid)
if (opform->oprleft != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operators must have index type as left input")));
+ errmsg("btree operators must have index type as left input")));
+
/*
- * The subtype is "default" (0) if oprright matches the operator class,
- * otherwise it is oprright.
+ * The subtype is "default" (0) if oprright matches the operator
+ * class, otherwise it is oprright.
*/
if (opform->oprright == typeoid)
subtype = InvalidOid;
@@ -471,6 +473,7 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid)
if (proctup == NULL)
elog(ERROR, "cache lookup failed for function %u", procOid);
procform = (Form_pg_proc) GETSTRUCT(proctup);
+
/*
* btree support procs must be 2-arg procs returning int4, and the
* first input type must match the operator class' input type.
@@ -486,10 +489,11 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid)
if (procform->proargtypes[0] != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree procedures must have index type as first input")));
+ errmsg("btree procedures must have index type as first input")));
+
/*
- * The subtype is "default" (0) if second input type matches the operator
- * class, otherwise it is the second input type.
+ * The subtype is "default" (0) if second input type matches the
+ * operator class, otherwise it is the second input type.
*/
if (procform->proargtypes[1] == typeoid)
subtype = InvalidOid;
@@ -518,13 +522,13 @@ addClassMember(List **list, OpClassMember *member, bool isProc)
if (isProc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("procedure number %d appears more than once",
- member->number)));
+ errmsg("procedure number %d appears more than once",
+ member->number)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- member->number)));
+ errmsg("operator number %d appears more than once",
+ member->number)));
}
}
*list = lappend(*list, member);
@@ -885,7 +889,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId)
char *opcname;
HeapTuple tup;
Relation rel;
- Form_pg_opclass opcForm;
+ Form_pg_opclass opcForm;
amOid = GetSysCacheOid(AMNAME,
CStringGetDatum(access_method),
@@ -937,7 +941,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId)
}
opcForm = (Form_pg_opclass) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -949,7 +953,10 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
opcForm->opcowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 0605b75b3ed..280404ceb7e 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.18 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.19 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -275,7 +275,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
Oid operOid;
HeapTuple tup;
Relation rel;
- Form_pg_operator oprForm;
+ Form_pg_operator oprForm;
rel = heap_openr(OperatorRelationName, RowExclusiveLock);
@@ -283,14 +283,14 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
false);
tup = SearchSysCacheCopy(OPEROID,
- ObjectIdGetDatum(operOid),
- 0, 0, 0);
+ ObjectIdGetDatum(operOid),
+ 0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for operator %u", operOid);
oprForm = (Form_pg_operator) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -302,7 +302,10 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
oprForm->oprowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
@@ -314,5 +317,3 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
heap_freetuple(tup);
}
-
-
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index a8356e5dcf3..08b14013547 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.32 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.33 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,10 +106,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
/*
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case
- * we had a command like
- * DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
- * This will have been parsed using the outer parameter set and the
+ * memory context. We want to pass down the parameter values in case
+ * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo =
+ * $1 This will have been parsed using the outer parameter set and the
* parameter value needs to be preserved for use when the cursor is
* executed.
*/
@@ -180,8 +179,8 @@ PerformPortalFetch(FetchStmt *stmt,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("cursor \"%s\" does not exist", stmt->portalname)));
- return; /* keep compiler happy */
+ errmsg("cursor \"%s\" does not exist", stmt->portalname)));
+ return; /* keep compiler happy */
}
/* Adjust dest if needed. MOVE wants destination None */
@@ -228,7 +227,7 @@ PerformPortalClose(const char *name)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("cursor \"%s\" does not exist", name)));
- return; /* keep compiler happy */
+ return; /* keep compiler happy */
}
/*
@@ -354,8 +353,9 @@ PersistHoldablePortal(Portal portal)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in the
- * tuplestore, so that subsequent backward FETCHs can be processed.
+ * Rewind the executor: we need to store the entire result set in
+ * the tuplestore, so that subsequent backward FETCHs can be
+ * processed.
*/
ExecutorRewind(queryDesc);
@@ -371,15 +371,15 @@ PersistHoldablePortal(Portal portal)
/*
* Now shut down the inner executor.
*/
- portal->queryDesc = NULL; /* prevent double shutdown */
+ portal->queryDesc = NULL; /* prevent double shutdown */
ExecutorEnd(queryDesc);
/*
* Reset the position in the result set: ideally, this could be
- * implemented by just skipping straight to the tuple # that we need
- * to be at, but the tuplestore API doesn't support that. So we start
- * at the beginning of the tuplestore and iterate through it until we
- * reach where we need to be. FIXME someday?
+ * implemented by just skipping straight to the tuple # that we
+ * need to be at, but the tuplestore API doesn't support that. So
+ * we start at the beginning of the tuplestore and iterate through
+ * it until we reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
@@ -389,8 +389,8 @@ PersistHoldablePortal(Portal portal)
if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not reposition held cursor")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not reposition held cursor")));
tuplestore_rescan(portal->holdStore);
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 31de3e839f9..032fe4acbcd 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.30 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.31 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -211,7 +211,8 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
int nargs = list_length(argtypes);
ParamListInfo paramLI;
List *exprstates;
- ListCell *le, *la;
+ ListCell *le,
+ *la;
int i = 0;
/* Parser should have caught this error, but check for safety */
@@ -510,7 +511,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
}
/* Explain each query */
- forboth (q, query_list, p, plan_list)
+ forboth(q, query_list, p, plan_list)
{
Query *query = (Query *) lfirst(q);
Plan *plan = (Plan *) lfirst(p);
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 94de3f1235e..404436e8c0e 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.54 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.55 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -101,8 +101,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type \"language_handler\"",
- NameListToString(stmt->plhandler))));
+ errmsg("function %s must return type \"language_handler\"",
+ NameListToString(stmt->plhandler))));
}
/* validate the validator function */
@@ -126,12 +126,12 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
i = 0;
namestrcpy(&langname, languageName);
- values[i++] = NameGetDatum(&langname); /* lanname */
- values[i++] = BoolGetDatum(true); /* lanispl */
- values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */
- values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */
- values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */
- nulls[i] = 'n'; /* lanacl */
+ values[i++] = NameGetDatum(&langname); /* lanname */
+ values[i++] = BoolGetDatum(true); /* lanispl */
+ values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */
+ values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */
+ values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */
+ nulls[i] = 'n'; /* lanacl */
rel = heap_openr(LanguageRelationName, RowExclusiveLock);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index df565d46e8e..8a3d02d100b 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.23 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.24 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,12 +103,12 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/*
- * Select default tablespace for schema. If not given, use zero
- * which implies the database's default tablespace.
+ * Select default tablespace for schema. If not given, use zero which
+ * implies the database's default tablespace.
*/
if (stmt->tablespacename)
{
- AclResult aclresult;
+ AclResult aclresult;
tablespaceId = get_tablespace_oid(stmt->tablespacename);
if (!OidIsValid(tablespaceId))
@@ -122,7 +122,9 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
stmt->tablespacename);
- } else {
+ }
+ else
+ {
tablespaceId = InvalidOid;
/* note there is no permission check in this path */
}
@@ -316,20 +318,20 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId)
{
HeapTuple tup;
Relation rel;
- Form_pg_namespace nspForm;
+ Form_pg_namespace nspForm;
rel = heap_openr(NamespaceRelationName, RowExclusiveLock);
tup = SearchSysCache(NAMESPACENAME,
- CStringGetDatum(name),
- 0, 0, 0);
+ CStringGetDatum(name),
+ 0, 0, 0);
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
errmsg("schema \"%s\" does not exist", name)));
nspForm = (Form_pg_namespace) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -338,7 +340,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId)
Datum repl_val[Natts_pg_namespace];
char repl_null[Natts_pg_namespace];
char repl_repl[Natts_pg_namespace];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
@@ -377,7 +379,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId)
heap_freetuple(newtuple);
}
-
+
ReleaseSysCache(tup);
heap_close(rel, NoLock);
}
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index d9852ed9d9f..53ec53e39fd 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.115 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.116 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -326,7 +326,7 @@ AlterSequence(AlterSeqStmt *stmt)
memcpy(seq, &new, sizeof(FormData_pg_sequence));
/* Clear local cache so that we don't think we have cached numbers */
- elm->last = new.last_value; /* last returned number */
+ elm->last = new.last_value; /* last returned number */
elm->cached = new.last_value; /* last cached number (forget
* cached values) */
@@ -950,26 +950,22 @@ init_params(List *options, Form_pg_sequence new, bool isInit)
/* MAXVALUE (null arg means NO MAXVALUE) */
if (max_value != NULL && max_value->arg)
- {
new->max_value = defGetInt64(max_value);
- }
else if (isInit || max_value != NULL)
{
if (new->increment_by > 0)
new->max_value = SEQ_MAXVALUE; /* ascending seq */
else
- new->max_value = -1; /* descending seq */
+ new->max_value = -1; /* descending seq */
}
/* MINVALUE (null arg means NO MINVALUE) */
if (min_value != NULL && min_value->arg)
- {
new->min_value = defGetInt64(min_value);
- }
else if (isInit || min_value != NULL)
{
if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
+ new->min_value = 1; /* ascending seq */
else
new->min_value = SEQ_MINVALUE; /* descending seq */
}
@@ -1073,7 +1069,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 73a51c2da9f..ab0d659dc5b 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.128 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.129 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,8 +77,8 @@ typedef struct OnCommitItem
* entries in the list until commit so that we can roll back if
* needed.
*/
- TransactionId creating_xid;
- TransactionId deleting_xid;
+ TransactionId creating_xid;
+ TransactionId deleting_xid;
} OnCommitItem;
static List *on_commits = NIL;
@@ -117,7 +117,7 @@ typedef struct AlteredTableInfo
char relkind; /* Its relkind */
TupleDesc oldDesc; /* Pre-modification tuple descriptor */
/* Information saved by Phase 1 for Phase 2: */
- List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */
+ List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */
/* Information saved by Phases 1/2 for Phase 3: */
List *constraints; /* List of NewConstraint */
List *newvals; /* List of NewColumnValue */
@@ -125,8 +125,8 @@ typedef struct AlteredTableInfo
/* Objects to rebuild after completing ALTER TYPE operations */
List *changedConstraintOids; /* OIDs of constraints to rebuild */
List *changedConstraintDefs; /* string definitions of same */
- List *changedIndexOids; /* OIDs of indexes to rebuild */
- List *changedIndexDefs; /* string definitions of same */
+ List *changedIndexOids; /* OIDs of indexes to rebuild */
+ List *changedIndexDefs; /* string definitions of same */
} AlteredTableInfo;
/* Struct describing one new constraint to check in Phase 3 scan */
@@ -171,12 +171,12 @@ static bool needs_toast_table(Relation rel);
static int transformColumnNameList(Oid relId, List *colList,
int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
- List **attnamelist,
- int16 *attnums, Oid *atttypids,
- Oid *opclasses);
+ List **attnamelist,
+ int16 *attnums, Oid *atttypids,
+ Oid *opclasses);
static Oid transformFkeyCheckAttrs(Relation pkrel,
- int numattrs, int16 *attnums,
- Oid *opclasses);
+ int numattrs, int16 *attnums,
+ Oid *opclasses);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
@@ -184,7 +184,7 @@ static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
static char *fkMatchTypeToString(char match_type);
static void ATController(Relation rel, List *cmds, bool recurse);
static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
- bool recurse, bool recursing);
+ bool recurse, bool recursing);
static void ATRewriteCatalogs(List **wqueue);
static void ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd);
static void ATRewriteTables(List **wqueue);
@@ -192,55 +192,55 @@ static void ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap);
static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel);
static void ATSimplePermissions(Relation rel, bool allowView);
static void ATSimpleRecursion(List **wqueue, Relation rel,
- AlterTableCmd *cmd, bool recurse);
+ AlterTableCmd *cmd, bool recurse);
static void ATOneLevelRecursion(List **wqueue, Relation rel,
- AlterTableCmd *cmd);
+ AlterTableCmd *cmd);
static void find_composite_type_dependencies(Oid typeOid,
- const char *origTblName);
+ const char *origTblName);
static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
- AlterTableCmd *cmd);
+ AlterTableCmd *cmd);
static void ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
- ColumnDef *colDef);
+ ColumnDef *colDef);
static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid);
static void add_column_support_dependency(Oid relid, int32 attnum,
- RangeVar *support);
+ RangeVar *support);
static void ATExecDropNotNull(Relation rel, const char *colName);
static void ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
- const char *colName);
+ const char *colName);
static void ATExecColumnDefault(Relation rel, const char *colName,
- Node *newDefault);
+ Node *newDefault);
static void ATPrepSetStatistics(Relation rel, const char *colName,
- Node *flagValue);
+ Node *flagValue);
static void ATExecSetStatistics(Relation rel, const char *colName,
- Node *newValue);
+ Node *newValue);
static void ATExecSetStorage(Relation rel, const char *colName,
- Node *newValue);
+ Node *newValue);
static void ATExecDropColumn(Relation rel, const char *colName,
- DropBehavior behavior,
- bool recurse, bool recursing);
+ DropBehavior behavior,
+ bool recurse, bool recursing);
static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
- IndexStmt *stmt, bool is_rebuild);
+ IndexStmt *stmt, bool is_rebuild);
static void ATExecAddConstraint(AlteredTableInfo *tab, Relation rel,
- Node *newConstraint);
+ Node *newConstraint);
static void ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
- FkConstraint *fkconstraint);
+ FkConstraint *fkconstraint);
static void ATPrepDropConstraint(List **wqueue, Relation rel,
- bool recurse, AlterTableCmd *cmd);
+ bool recurse, AlterTableCmd *cmd);
static void ATExecDropConstraint(Relation rel, const char *constrName,
- DropBehavior behavior, bool quiet);
+ DropBehavior behavior, bool quiet);
static void ATPrepAlterColumnType(List **wqueue,
- AlteredTableInfo *tab, Relation rel,
- bool recurse, bool recursing,
- AlterTableCmd *cmd);
+ AlteredTableInfo *tab, Relation rel,
+ bool recurse, bool recursing,
+ AlterTableCmd *cmd);
static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
- const char *colName, TypeName *typename);
+ const char *colName, TypeName *typename);
static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab);
static void ATPostAlterTypeParse(char *cmd, List **wqueue);
static void ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId);
static void ATExecClusterOn(Relation rel, const char *indexName);
static void ATExecDropCluster(Relation rel);
static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel,
- char *tablespacename);
+ char *tablespacename);
static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace);
static void copy_relation_data(Relation rel, SMgrRelation dst);
static int ri_trigger_type(Oid tgfoid);
@@ -289,7 +289,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("ON COMMIT can only be used on temporary tables")));
+ errmsg("ON COMMIT can only be used on temporary tables")));
/*
* Look up the namespace in which we are supposed to create the
@@ -310,12 +310,13 @@ DefineRelation(CreateStmt *stmt, char relkind)
}
/*
- * Select tablespace to use. If not specified, use containing schema's
- * default tablespace (which may in turn default to database's default).
+ * Select tablespace to use. If not specified, use containing
+ * schema's default tablespace (which may in turn default to
+ * database's default).
*/
if (stmt->tablespacename)
{
- AclResult aclresult;
+ AclResult aclresult;
tablespaceId = get_tablespace_oid(stmt->tablespacename);
if (!OidIsValid(tablespaceId))
@@ -329,7 +330,9 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
stmt->tablespacename);
- } else {
+ }
+ else
+ {
tablespaceId = get_namespace_tablespace(namespaceId);
/* note no permission check on tablespace in this case */
}
@@ -340,7 +343,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
*/
schema = MergeAttributes(schema, stmt->inhRelations,
stmt->relation->istemp,
- &inheritOids, &old_constraints, &parentOidCount);
+ &inheritOids, &old_constraints, &parentOidCount);
/*
* Create a relation descriptor from the relation schema and create
@@ -357,23 +360,25 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (old_constraints != NIL)
{
ConstrCheck *check = (ConstrCheck *)
- palloc0(list_length(old_constraints) * sizeof(ConstrCheck));
+ palloc0(list_length(old_constraints) * sizeof(ConstrCheck));
int ncheck = 0;
foreach(listptr, old_constraints)
{
Constraint *cdef = (Constraint *) lfirst(listptr);
- bool dup = false;
+ bool dup = false;
if (cdef->contype != CONSTR_CHECK)
continue;
Assert(cdef->name != NULL);
Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL);
+
/*
- * In multiple-inheritance situations, it's possible to inherit
- * the same grandparent constraint through multiple parents.
- * Hence, discard inherited constraints that match as to both
- * name and expression. Otherwise, gripe if the names conflict.
+ * In multiple-inheritance situations, it's possible to
+ * inherit the same grandparent constraint through multiple
+ * parents. Hence, discard inherited constraints that match as
+ * to both name and expression. Otherwise, gripe if the names
+ * conflict.
*/
for (i = 0; i < ncheck; i++)
{
@@ -546,8 +551,9 @@ TruncateRelation(const RangeVar *relation)
RelationGetRelationName(rel))));
/*
- * We can never allow truncation of shared or nailed-in-cache relations,
- * because we can't support changing their relfilenode values.
+ * We can never allow truncation of shared or nailed-in-cache
+ * relations, because we can't support changing their relfilenode
+ * values.
*/
if (rel->rd_rel->relisshared || rel->rd_isnailed)
ereport(ERROR,
@@ -562,7 +568,7 @@ TruncateRelation(const RangeVar *relation)
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
/*
* Don't allow truncate on tables which are referenced by foreign keys
@@ -571,7 +577,7 @@ TruncateRelation(const RangeVar *relation)
/*
* Okay, here we go: create a new empty storage file for the relation,
- * and assign it as the relfilenode value. The old storage file is
+ * and assign it as the relfilenode value. The old storage file is
* scheduled for deletion at commit.
*/
setNewRelfilenode(rel);
@@ -797,8 +803,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
def->typename->typmod != attribute->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("inherited column \"%s\" has a type conflict",
- attributeName),
+ errmsg("inherited column \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
format_type_be(attribute->atttypid))));
@@ -935,15 +941,15 @@ MergeAttributes(List *schema, List *supers, bool istemp,
* have the same type and typmod.
*/
ereport(NOTICE,
- (errmsg("merging column \"%s\" with inherited definition",
- attributeName)));
+ (errmsg("merging column \"%s\" with inherited definition",
+ attributeName)));
def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) ||
def->typename->typmod != newdef->typename->typmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" has a type conflict",
- attributeName),
+ errmsg("column \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
TypeNameToString(newdef->typename))));
@@ -1061,12 +1067,12 @@ StoreCatalogInheritance(Oid relationId, List *supers)
/*
* Store INHERITS information in pg_inherits using direct ancestors
- * only. Also enter dependencies on the direct ancestors, and make sure
- * they are marked with relhassubclass = true.
+ * only. Also enter dependencies on the direct ancestors, and make
+ * sure they are marked with relhassubclass = true.
*
* (Once upon a time, both direct and indirect ancestors were found here
- * and then entered into pg_ipl. Since that catalog doesn't exist anymore,
- * there's no need to look for indirect ancestors.)
+ * and then entered into pg_ipl. Since that catalog doesn't exist
+ * anymore, there's no need to look for indirect ancestors.)
*/
relation = heap_openr(InheritsRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -1081,7 +1087,7 @@ StoreCatalogInheritance(Oid relationId, List *supers)
parentobject;
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
- datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
+ datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
@@ -1156,9 +1162,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
- * If the tuple already has the right relhassubclass setting, we
- * don't need to update it, but we still need to issue an SI inval
- * message.
+ * If the tuple already has the right relhassubclass setting, we don't
+ * need to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -1318,7 +1323,7 @@ renameatt(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" already exists",
+ errmsg("column \"%s\" of relation \"%s\" already exists",
newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
@@ -1712,9 +1717,9 @@ update_ri_trigger_args(Oid relid,
* rebuild relcache entries. (Ideally this should happen
* automatically...)
*
- * We can skip this for triggers on relid itself, since that
- * relcache flush will happen anyway due to the table or column
- * rename. We just need to catch the far ends of RI relationships.
+ * We can skip this for triggers on relid itself, since that relcache
+ * flush will happen anyway due to the table or column rename. We
+ * just need to catch the far ends of RI relationships.
*/
pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
if (pg_trigger->tgrelid != relid)
@@ -1747,11 +1752,11 @@ update_ri_trigger_args(Oid relid,
* 3. Scan table(s) to check new constraints, and optionally recopy
* the data into new table(s).
* Phase 3 is not performed unless one or more of the subcommands requires
- * it. The intention of this design is to allow multiple independent
+ * it. The intention of this design is to allow multiple independent
* updates of the table schema to be performed with only one pass over the
* data.
*
- * ATPrepCmd performs phase 1. A "work queue" entry is created for
+ * ATPrepCmd performs phase 1. A "work queue" entry is created for
* each table to be affected (there may be multiple affected tables if the
* commands traverse a table inheritance hierarchy). Also we do preliminary
* validation of the subcommands, including parse transformation of those
@@ -1762,7 +1767,7 @@ update_ri_trigger_args(Oid relid,
* phases 2 and 3 do no explicit recursion, since phase 1 already did it).
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
- * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
+ * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
* lists, one for each logical "pass" of phase 2.
*
* ATRewriteTables performs phase 3 for those tables that need it.
@@ -1843,8 +1848,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
cmd = copyObject(cmd);
/*
- * Do permissions checking, recursion to child tables if needed,
- * and any additional phase-1 processing needed.
+ * Do permissions checking, recursion to child tables if needed, and
+ * any additional phase-1 processing needed.
*/
switch (cmd->subtype)
{
@@ -1855,9 +1860,10 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
pass = AT_PASS_ADD_COL;
break;
case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */
+
/*
- * We allow defaults on views so that INSERT into a view can have
- * default-ish behavior. This works because the rewriter
+ * We allow defaults on views so that INSERT into a view can
+ * have default-ish behavior. This works because the rewriter
* substitutes default values into INSERTs before it expands
* rules.
*/
@@ -1906,6 +1912,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
break;
case AT_AddConstraint: /* ADD CONSTRAINT */
ATSimplePermissions(rel, false);
+
/*
* Currently we recurse only for CHECK constraints, never for
* foreign-key constraints. UNIQUE/PKEY constraints won't be
@@ -1928,13 +1935,13 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/* No command-specific prep needed */
pass = AT_PASS_DROP;
break;
- case AT_AlterColumnType: /* ALTER COLUMN TYPE */
+ case AT_AlterColumnType: /* ALTER COLUMN TYPE */
ATSimplePermissions(rel, false);
/* Performs own recursion */
ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd);
pass = AT_PASS_ALTER_TYPE;
break;
- case AT_ToastTable: /* CREATE TOAST TABLE */
+ case AT_ToastTable: /* CREATE TOAST TABLE */
ATSimplePermissions(rel, false);
/* This command never recurses */
/* No command-specific prep needed */
@@ -1945,14 +1952,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
- case AT_ClusterOn: /* CLUSTER ON */
+ case AT_ClusterOn: /* CLUSTER ON */
case AT_DropCluster: /* SET WITHOUT CLUSTER */
ATSimplePermissions(rel, false);
/* These commands never recurse */
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
- case AT_DropOids: /* SET WITHOUT OIDS */
+ case AT_DropOids: /* SET WITHOUT OIDS */
ATSimplePermissions(rel, false);
/* Performs own recursion */
if (rel->rd_rel->relhasoids)
@@ -1969,9 +1976,9 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
case AT_SetTableSpace: /* SET TABLESPACE */
/* This command never recurses */
ATPrepSetTableSpace(tab, rel, cmd->name);
- pass = AT_PASS_MISC; /* doesn't actually matter */
+ pass = AT_PASS_MISC; /* doesn't actually matter */
break;
- default: /* oops */
+ default: /* oops */
elog(ERROR, "unrecognized alter table type: %d",
(int) cmd->subtype);
pass = 0; /* keep compiler quiet */
@@ -1985,7 +1992,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* ATRewriteCatalogs
*
- * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
+ * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
* dispatched in a "safe" execution order (designed to avoid unnecessary
* conflicts).
*/
@@ -1997,10 +2004,10 @@ ATRewriteCatalogs(List **wqueue)
/*
* We process all the tables "in parallel", one pass at a time. This
- * is needed because we may have to propagate work from one table
- * to another (specifically, ALTER TYPE on a foreign key's PK has to
+ * is needed because we may have to propagate work from one table to
+ * another (specifically, ALTER TYPE on a foreign key's PK has to
* dispatch the re-adding of the foreign key constraint to the other
- * table). Work can only be propagated into later passes, however.
+ * table). Work can only be propagated into later passes, however.
*/
for (pass = 0; pass < AT_NUM_PASSES; pass++)
{
@@ -2015,18 +2022,19 @@ ATRewriteCatalogs(List **wqueue)
if (subcmds == NIL)
continue;
- /* Exclusive lock was obtained by phase 1, needn't get it again */
+ /*
+ * Exclusive lock was obtained by phase 1, needn't get it
+ * again
+ */
rel = relation_open(tab->relid, NoLock);
foreach(lcmd, subcmds)
- {
ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd));
- }
/*
- * After the ALTER TYPE pass, do cleanup work (this is not done in
- * ATExecAlterColumnType since it should be done only once if
- * multiple columns of a table are altered).
+ * After the ALTER TYPE pass, do cleanup work (this is not
+ * done in ATExecAlterColumnType since it should be done only
+ * once if multiple columns of a table are altered).
*/
if (pass == AT_PASS_ALTER_TYPE)
ATPostAlterTypeCleanup(wqueue, tab);
@@ -2047,9 +2055,7 @@ ATRewriteCatalogs(List **wqueue)
(tab->subcmds[AT_PASS_ADD_COL] ||
tab->subcmds[AT_PASS_ALTER_TYPE] ||
tab->subcmds[AT_PASS_COL_ATTRS]))
- {
AlterTableCreateToastTable(tab->relid, true);
- }
}
}
@@ -2082,7 +2088,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
case AT_DropColumn: /* DROP COLUMN */
ATExecDropColumn(rel, cmd->name, cmd->behavior, false, false);
break;
- case AT_DropColumnRecurse: /* DROP COLUMN with recursion */
+ case AT_DropColumnRecurse: /* DROP COLUMN with recursion */
ATExecDropColumn(rel, cmd->name, cmd->behavior, true, false);
break;
case AT_AddIndex: /* ADD INDEX */
@@ -2100,7 +2106,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
case AT_DropConstraintQuietly: /* DROP CONSTRAINT for child */
ATExecDropConstraint(rel, cmd->name, cmd->behavior, true);
break;
- case AT_AlterColumnType: /* ALTER COLUMN TYPE */
+ case AT_AlterColumnType: /* ALTER COLUMN TYPE */
ATExecAlterColumnType(tab, rel, cmd->name, (TypeName *) cmd->def);
break;
case AT_ToastTable: /* CREATE TOAST TABLE */
@@ -2113,29 +2119,31 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
case AT_ClusterOn: /* CLUSTER ON */
ATExecClusterOn(rel, cmd->name);
break;
- case AT_DropCluster: /* SET WITHOUT CLUSTER */
+ case AT_DropCluster: /* SET WITHOUT CLUSTER */
ATExecDropCluster(rel);
break;
case AT_DropOids: /* SET WITHOUT OIDS */
+
/*
- * Nothing to do here; we'll have generated a DropColumn subcommand
- * to do the real work
+ * Nothing to do here; we'll have generated a DropColumn
+ * subcommand to do the real work
*/
break;
- case AT_SetTableSpace: /* SET TABLESPACE */
+ case AT_SetTableSpace: /* SET TABLESPACE */
+
/*
* Nothing to do here; Phase 3 does the work
*/
break;
- default: /* oops */
+ default: /* oops */
elog(ERROR, "unrecognized alter table type: %d",
(int) cmd->subtype);
break;
}
/*
- * Bump the command counter to ensure the next subcommand in the sequence
- * can see the changes so far
+ * Bump the command counter to ensure the next subcommand in the
+ * sequence can see the changes so far
*/
CommandCounterIncrement();
}
@@ -2164,14 +2172,14 @@ ATRewriteTables(List **wqueue)
char NewHeapName[NAMEDATALEN];
Oid NewTableSpace;
Relation OldHeap;
- ObjectAddress object;
+ ObjectAddress object;
OldHeap = heap_open(tab->relid, NoLock);
/*
* We can never allow rewriting of shared or nailed-in-cache
- * relations, because we can't support changing their relfilenode
- * values.
+ * relations, because we can't support changing their
+ * relfilenode values.
*/
if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed)
ereport(ERROR,
@@ -2180,8 +2188,8 @@ ATRewriteTables(List **wqueue)
RelationGetRelationName(OldHeap))));
/*
- * Don't allow rewrite on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow rewrite on temp tables of other backends ...
+ * their local buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
@@ -2201,11 +2209,12 @@ ATRewriteTables(List **wqueue)
/*
* Create the new heap, using a temporary name in the same
- * namespace as the existing table. NOTE: there is some risk of
- * collision with user relnames. Working around this seems more
- * trouble than it's worth; in particular, we can't create the new
- * heap in a different namespace from the old, or we will have
- * problems with the TEMP status of temp tables.
+ * namespace as the existing table. NOTE: there is some risk
+ * of collision with user relnames. Working around this seems
+ * more trouble than it's worth; in particular, we can't
+ * create the new heap in a different namespace from the old,
+ * or we will have problems with the TEMP status of temp
+ * tables.
*/
snprintf(NewHeapName, sizeof(NewHeapName),
"pg_temp_%u", tab->relid);
@@ -2230,15 +2239,15 @@ ATRewriteTables(List **wqueue)
object.objectSubId = 0;
/*
- * The new relation is local to our transaction and we know nothing
- * depends on it, so DROP_RESTRICT should be OK.
+ * The new relation is local to our transaction and we know
+ * nothing depends on it, so DROP_RESTRICT should be OK.
*/
performDeletion(&object, DROP_RESTRICT);
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast table,
- * which is all-new anyway). We do not need
+ * Rebuild each index on the relation (but not the toast
+ * table, which is all-new anyway). We do not need
* CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(tab->relid, false);
@@ -2246,14 +2255,16 @@ ATRewriteTables(List **wqueue)
else
{
/*
- * Test the current data within the table against new constraints
- * generated by ALTER TABLE commands, but don't rebuild data.
+ * Test the current data within the table against new
+ * constraints generated by ALTER TABLE commands, but don't
+ * rebuild data.
*/
if (tab->constraints != NIL)
ATRewriteTable(tab, InvalidOid);
+
/*
- * If we had SET TABLESPACE but no reason to reconstruct tuples,
- * just do a block-by-block copy.
+ * If we had SET TABLESPACE but no reason to reconstruct
+ * tuples, just do a block-by-block copy.
*/
if (tab->newTableSpace)
ATExecSetTableSpace(tab->relid, tab->newTableSpace);
@@ -2261,17 +2272,17 @@ ATRewriteTables(List **wqueue)
}
/*
- * Foreign key constraints are checked in a final pass, since
- * (a) it's generally best to examine each one separately, and
- * (b) it's at least theoretically possible that we have changed
- * both relations of the foreign key, and we'd better have finished
- * both rewrites before we try to read the tables.
+ * Foreign key constraints are checked in a final pass, since (a) it's
+ * generally best to examine each one separately, and (b) it's at
+ * least theoretically possible that we have changed both relations of
+ * the foreign key, and we'd better have finished both rewrites before
+ * we try to read the tables.
*/
foreach(ltab, *wqueue)
{
- AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab);
- Relation rel = NULL;
- ListCell *lcon;
+ AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab);
+ Relation rel = NULL;
+ ListCell *lcon;
foreach(lcon, tab->constraints)
{
@@ -2324,7 +2335,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
*/
oldrel = heap_open(tab->relid, NoLock);
oldTupDesc = tab->oldDesc;
- newTupDesc = RelationGetDescr(oldrel); /* includes all mods */
+ newTupDesc = RelationGetDescr(oldrel); /* includes all mods */
if (OidIsValid(OIDNewHeap))
newrel = heap_open(OIDNewHeap, AccessExclusiveLock);
@@ -2335,9 +2346,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
* If we need to rewrite the table, the operation has to be propagated
* to tables that use this table's rowtype as a column type.
*
- * (Eventually this will probably become true for scans as well, but
- * at the moment a composite type does not enforce any constraints,
- * so it's not necessary/appropriate to enforce them just during ALTER.)
+ * (Eventually this will probably become true for scans as well, but at
+ * the moment a composite type does not enforce any constraints, so
+ * it's not necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
@@ -2375,7 +2386,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
foreach(l, tab->newvals)
{
- NewColumnValue *ex = lfirst(l);
+ NewColumnValue *ex = lfirst(l);
needscan = true;
@@ -2384,12 +2395,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
if (needscan)
{
- ExprContext *econtext;
+ ExprContext *econtext;
Datum *values;
char *nulls;
TupleTableSlot *oldslot;
TupleTableSlot *newslot;
- HeapScanDesc scan;
+ HeapScanDesc scan;
HeapTuple tuple;
econtext = GetPerTupleExprContext(estate);
@@ -2425,7 +2436,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
* Extract data from old tuple. We can force to null any
* columns that are deleted according to the new tuple.
*/
- int natts = newTupDesc->natts;
+ int natts = newTupDesc->natts;
heap_deformtuple(tuple, oldTupDesc, values, nulls);
@@ -2436,16 +2447,16 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
}
/*
- * Process supplied expressions to replace selected columns.
- * Expression inputs come from the old tuple.
+ * Process supplied expressions to replace selected
+ * columns. Expression inputs come from the old tuple.
*/
ExecStoreTuple(tuple, oldslot, InvalidBuffer, false);
econtext->ecxt_scantuple = oldslot;
foreach(l, tab->newvals)
{
- NewColumnValue *ex = lfirst(l);
- bool isNull;
+ NewColumnValue *ex = lfirst(l);
+ bool isNull;
values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
econtext,
@@ -2478,20 +2489,20 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
con->name)));
break;
case CONSTR_NOTNULL:
- {
- Datum d;
- bool isnull;
+ {
+ Datum d;
+ bool isnull;
- d = heap_getattr(tuple, con->attnum, newTupDesc,
- &isnull);
- if (isnull)
- ereport(ERROR,
+ d = heap_getattr(tuple, con->attnum, newTupDesc,
+ &isnull);
+ if (isnull)
+ ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
get_attname(tab->relid,
con->attnum))));
- }
- break;
+ }
+ break;
case CONSTR_FOREIGN:
/* Nothing to do here */
break;
@@ -2733,8 +2744,9 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName)
else if (OidIsValid(rel->rd_rel->reltype))
{
/*
- * A view or composite type itself isn't a problem, but we must
- * recursively check for indirect dependencies via its rowtype.
+ * A view or composite type itself isn't a problem, but we
+ * must recursively check for indirect dependencies via its
+ * rowtype.
*/
find_composite_type_dependencies(rel->rd_rel->reltype,
origTblName);
@@ -2790,7 +2802,7 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
if (find_inheritance_children(RelationGetRelid(rel)) != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("column must be added to child tables too")));
+ errmsg("column must be added to child tables too")));
}
}
@@ -2815,8 +2827,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
attrdesc = heap_openr(AttributeRelationName, RowExclusiveLock);
/*
- * Are we adding the column to a recursion child? If so, check whether
- * to merge with an existing definition for the column.
+ * Are we adding the column to a recursion child? If so, check
+ * whether to merge with an existing definition for the column.
*/
if (colDef->inhcount > 0)
{
@@ -2834,7 +2846,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(rel), colDef->colname)));
+ RelationGetRelationName(rel), colDef->colname)));
/* Bump the existing child att's inhcount */
childatt->attinhcount++;
@@ -2846,7 +2858,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/* Inform the user about the merge */
ereport(NOTICE,
(errmsg("merging definition of column \"%s\" for child \"%s\"",
- colDef->colname, RelationGetRelationName(rel))));
+ colDef->colname, RelationGetRelationName(rel))));
heap_close(attrdesc, RowExclusiveLock);
return;
@@ -2872,8 +2884,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" already exists",
- colDef->colname, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" already exists",
+ colDef->colname, RelationGetRelationName(rel))));
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
@@ -2965,21 +2977,20 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
- * If there is no default, Phase 3 doesn't have to do anything,
- * because that effectively means that the default is NULL. The
- * heap tuple access routines always check for attnum > # of attributes
- * in tuple, and return NULL if so, so without any modification of
- * the tuple data we will get the effect of NULL values in the new
- * column.
+ * If there is no default, Phase 3 doesn't have to do anything, because
+ * that effectively means that the default is NULL. The heap tuple
+ * access routines always check for attnum > # of attributes in tuple,
+ * and return NULL if so, so without any modification of the tuple
+ * data we will get the effect of NULL values in the new column.
*
* Note: we use build_column_default, and not just the cooked default
- * returned by AddRelationRawConstraints, so that the right thing happens
- * when a datatype's default applies.
+ * returned by AddRelationRawConstraints, so that the right thing
+ * happens when a datatype's default applies.
*/
defval = (Expr *) build_column_default(rel, attribute->attnum);
if (defval)
{
- NewColumnValue *newval;
+ NewColumnValue *newval;
newval = (NewColumnValue *) palloc0(sizeof(NewColumnValue));
newval->attnum = attribute->attnum;
@@ -3099,8 +3110,8 @@ ATExecDropNotNull(Relation rel, const char *colName)
if (indexStruct->indkey[i] == attnum)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("column \"%s\" is in a primary key",
- colName)));
+ errmsg("column \"%s\" is in a primary key",
+ colName)));
}
}
@@ -3162,7 +3173,7 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
/*
* Okay, actually perform the catalog change ... if needed
*/
- if (! ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull)
+ if (!((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull)
{
((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE;
@@ -3199,8 +3210,8 @@ ATExecColumnDefault(Relation rel, const char *colName,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum <= 0)
@@ -3240,10 +3251,10 @@ static void
ATPrepSetStatistics(Relation rel, const char *colName, Node *flagValue)
{
/*
- * We do our own permission checking because (a) we want to allow
- * SET STATISTICS on indexes (for expressional index columns), and
- * (b) we want to allow SET STATISTICS on system catalogs without
- * requiring allowSystemTableMods to be turned on.
+ * We do our own permission checking because (a) we want to allow SET
+ * STATISTICS on indexes (for expressional index columns), and (b) we
+ * want to allow SET STATISTICS on system catalogs without requiring
+ * allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_INDEX)
@@ -3295,8 +3306,8 @@ ATExecSetStatistics(Relation rel, const char *colName, Node *newValue)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum <= 0)
@@ -3356,8 +3367,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum <= 0)
@@ -3394,9 +3405,9 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue)
*
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
* because we have to decide at runtime whether to recurse or not depending
- * on whether attinhcount goes to zero or not. (We can't check this in a
+ * on whether attinhcount goes to zero or not. (We can't check this in a
* static pre-pass because it won't handle multiple inheritance situations
- * correctly.) Since DROP COLUMN doesn't need to create any work queue
+ * correctly.) Since DROP COLUMN doesn't need to create any work queue
* entries for Phase 3, it's okay to recurse internally in this routine
* without considering the work queue.
*/
@@ -3479,8 +3490,8 @@ ATExecDropColumn(Relation rel, const char *colName,
{
/*
* If the child column has other definition sources, just
- * decrement its inheritance count; if not, recurse to delete
- * it.
+ * decrement its inheritance count; if not, recurse to
+ * delete it.
*/
if (childatt->attinhcount == 1 && !childatt->attislocal)
{
@@ -3504,9 +3515,9 @@ ATExecDropColumn(Relation rel, const char *colName,
else
{
/*
- * If we were told to drop ONLY in this table (no recursion),
- * we need to mark the inheritors' attribute as locally
- * defined rather than inherited.
+ * If we were told to drop ONLY in this table (no
+ * recursion), we need to mark the inheritors' attribute
+ * as locally defined rather than inherited.
*/
childatt->attinhcount--;
childatt->attislocal = true;
@@ -3547,7 +3558,7 @@ ATExecDropColumn(Relation rel, const char *colName,
class_rel = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)),
+ ObjectIdGetDatum(RelationGetRelid(rel)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u",
@@ -3575,9 +3586,9 @@ static void
ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
IndexStmt *stmt, bool is_rebuild)
{
- bool check_rights;
- bool skip_build;
- bool quiet;
+ bool check_rights;
+ bool skip_build;
+ bool quiet;
Assert(IsA(stmt, IndexStmt));
@@ -3588,17 +3599,17 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
/* suppress notices when rebuilding existing index */
quiet = is_rebuild;
- DefineIndex(stmt->relation, /* relation */
- stmt->idxname, /* index name */
- stmt->accessMethod, /* am name */
+ DefineIndex(stmt->relation, /* relation */
+ stmt->idxname, /* index name */
+ stmt->accessMethod, /* am name */
stmt->tableSpace,
- stmt->indexParams, /* parameters */
+ stmt->indexParams, /* parameters */
(Expr *) stmt->whereClause,
stmt->rangetable,
stmt->unique,
stmt->primary,
stmt->isconstraint,
- true, /* is_alter_table */
+ true, /* is_alter_table */
check_rights,
skip_build,
quiet);
@@ -3613,84 +3624,85 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
switch (nodeTag(newConstraint))
{
case T_Constraint:
- {
- Constraint *constr = (Constraint *) newConstraint;
-
- /*
- * Currently, we only expect to see CONSTR_CHECK nodes
- * arriving here (see the preprocessing done in
- * parser/analyze.c). Use a switch anyway to make it
- * easier to add more code later.
- */
- switch (constr->contype)
{
- case CONSTR_CHECK:
- {
- List *newcons;
- ListCell *lcon;
+ Constraint *constr = (Constraint *) newConstraint;
- /*
- * Call AddRelationRawConstraints to do the work.
- * It returns a list of cooked constraints.
- */
- newcons = AddRelationRawConstraints(rel, NIL,
- list_make1(constr));
- /* Add each constraint to Phase 3's queue */
- foreach(lcon, newcons)
- {
- CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon);
- NewConstraint *newcon;
-
- newcon = (NewConstraint *) palloc0(sizeof(NewConstraint));
- newcon->name = ccon->name;
- newcon->contype = ccon->contype;
- newcon->attnum = ccon->attnum;
- /* ExecQual wants implicit-AND format */
- newcon->qual = (Node *)
- make_ands_implicit((Expr *) ccon->expr);
-
- tab->constraints = lappend(tab->constraints,
- newcon);
- }
- break;
+ /*
+ * Currently, we only expect to see CONSTR_CHECK nodes
+ * arriving here (see the preprocessing done in
+ * parser/analyze.c). Use a switch anyway to make it
+ * easier to add more code later.
+ */
+ switch (constr->contype)
+ {
+ case CONSTR_CHECK:
+ {
+ List *newcons;
+ ListCell *lcon;
+
+ /*
+ * Call AddRelationRawConstraints to do the
+ * work. It returns a list of cooked
+ * constraints.
+ */
+ newcons = AddRelationRawConstraints(rel, NIL,
+ list_make1(constr));
+ /* Add each constraint to Phase 3's queue */
+ foreach(lcon, newcons)
+ {
+ CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon);
+ NewConstraint *newcon;
+
+ newcon = (NewConstraint *) palloc0(sizeof(NewConstraint));
+ newcon->name = ccon->name;
+ newcon->contype = ccon->contype;
+ newcon->attnum = ccon->attnum;
+ /* ExecQual wants implicit-AND format */
+ newcon->qual = (Node *)
+ make_ands_implicit((Expr *) ccon->expr);
+
+ tab->constraints = lappend(tab->constraints,
+ newcon);
+ }
+ break;
+ }
+ default:
+ elog(ERROR, "unrecognized constraint type: %d",
+ (int) constr->contype);
}
- default:
- elog(ERROR, "unrecognized constraint type: %d",
- (int) constr->contype);
+ break;
}
- break;
- }
case T_FkConstraint:
- {
- FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
-
- /*
- * Assign or validate constraint name
- */
- if (fkconstraint->constr_name)
{
- if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
- RelationGetNamespace(rel),
- fkconstraint->constr_name))
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" already exists",
- fkconstraint->constr_name,
- RelationGetRelationName(rel))));
- }
- else
- fkconstraint->constr_name =
- ChooseConstraintName(RelationGetRelationName(rel),
- strVal(linitial(fkconstraint->fk_attrs)),
- "fkey",
- RelationGetNamespace(rel),
- NIL);
+ FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
- ATAddForeignKeyConstraint(tab, rel, fkconstraint);
+ /*
+ * Assign or validate constraint name
+ */
+ if (fkconstraint->constr_name)
+ {
+ if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
+ RelationGetRelid(rel),
+ RelationGetNamespace(rel),
+ fkconstraint->constr_name))
+ ereport(ERROR,
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("constraint \"%s\" for relation \"%s\" already exists",
+ fkconstraint->constr_name,
+ RelationGetRelationName(rel))));
+ }
+ else
+ fkconstraint->constr_name =
+ ChooseConstraintName(RelationGetRelationName(rel),
+ strVal(linitial(fkconstraint->fk_attrs)),
+ "fkey",
+ RelationGetNamespace(rel),
+ NIL);
- break;
- }
+ ATAddForeignKeyConstraint(tab, rel, fkconstraint);
+
+ break;
+ }
default:
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(newConstraint));
@@ -3761,12 +3773,12 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
RelationGetRelationName(rel));
/*
- * Disallow reference from permanent table to temp table or vice versa.
- * (The ban on perm->temp is for fairly obvious reasons. The ban on
- * temp->perm is because other backends might need to run the RI triggers
- * on the perm table, but they can't reliably see tuples the owning
- * backend has created in the temp table, because non-shared buffers
- * are used for temp tables.)
+ * Disallow reference from permanent table to temp table or vice
+ * versa. (The ban on perm->temp is for fairly obvious reasons. The
+ * ban on temp->perm is because other backends might need to run the
+ * RI triggers on the perm table, but they can't reliably see tuples
+ * the owning backend has created in the temp table, because
+ * non-shared buffers are used for temp tables.)
*/
if (isTempNamespace(RelationGetNamespace(pkrel)))
{
@@ -3834,11 +3846,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* fktypoid[i] is the foreign key table's i'th key's type
*
* Note that we look for an operator with the PK type on the left;
- * when the types are different this is critical because the PK index
- * will need operators with the indexkey on the left. (Ordinarily
- * both commutator operators will exist if either does, but we won't
- * get the right answer from the test below on opclass membership
- * unless we select the proper operator.)
+ * when the types are different this is critical because the PK
+ * index will need operators with the indexkey on the left.
+ * (Ordinarily both commutator operators will exist if either
+ * does, but we won't get the right answer from the test below on
+ * opclass membership unless we select the proper operator.)
*/
Operator o = oper(list_make1(makeString("=")),
pktypoid[i], fktypoid[i], true);
@@ -3851,8 +3863,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of incompatible types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
@@ -3868,8 +3880,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of different types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
@@ -3877,8 +3889,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
}
/*
- * Tell Phase 3 to check that the constraint is satisfied by existing rows
- * (we can skip this during table creation).
+ * Tell Phase 3 to check that the constraint is satisfied by existing
+ * rows (we can skip this during table creation).
*/
if (!fkconstraint->skip_validation)
{
@@ -3971,10 +3983,10 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Also return the index OID and index opclasses of the
+ * for the pkrel. Also return the index OID and index opclasses of the
* index supporting the primary key.
*
- * All parameters except pkrel are output parameters. Also, the function
+ * All parameters except pkrel are output parameters. Also, the function
* return value is the number of attributes in the primary key.
*
* Used when the column list in the REFERENCES specification is omitted.
@@ -4060,7 +4072,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
static Oid
transformFkeyCheckAttrs(Relation pkrel,
int numattrs, int16 *attnums,
- Oid *opclasses) /* output parameter */
+ Oid *opclasses) /* output parameter */
{
Oid indexoid = InvalidOid;
bool found = false;
@@ -4190,8 +4202,8 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint,
trig.tginitdeferred = FALSE;
trig.tgargs = (char **) palloc(sizeof(char *) *
- (4 + list_length(fkconstraint->fk_attrs)
- + list_length(fkconstraint->pk_attrs)));
+ (4 + list_length(fkconstraint->fk_attrs)
+ + list_length(fkconstraint->pk_attrs)));
trig.tgargs[0] = trig.tgname;
trig.tgargs[1] = RelationGetRelationName(rel);
@@ -4518,8 +4530,8 @@ ATExecDropConstraint(Relation rel, const char *constrName,
/* Otherwise if more than one constraint deleted, notify */
else if (deleted > 1)
ereport(NOTICE,
- (errmsg("multiple constraints named \"%s\" were dropped",
- constrName)));
+ (errmsg("multiple constraints named \"%s\" were dropped",
+ constrName)));
}
}
@@ -4578,12 +4590,12 @@ ATPrepAlterColumnType(List **wqueue,
CheckAttributeType(colName, targettype);
/*
- * Set up an expression to transform the old data value to the new type.
- * If a USING option was given, transform and use that expression, else
- * just take the old value and try to coerce it. We do this first so
- * that type incompatibility can be detected before we waste effort,
- * and because we need the expression to be parsed against the original
- * table rowtype.
+ * Set up an expression to transform the old data value to the new
+ * type. If a USING option was given, transform and use that
+ * expression, else just take the old value and try to coerce it. We
+ * do this first so that type incompatibility can be detected before
+ * we waste effort, and because we need the expression to be parsed
+ * against the original table rowtype.
*/
if (cmd->transform)
{
@@ -4592,7 +4604,7 @@ ATPrepAlterColumnType(List **wqueue,
/* Expression must be able to access vars of old table */
rte = addRangeTableEntryForRelation(pstate,
RelationGetRelid(rel),
- makeAlias(RelationGetRelationName(rel), NIL),
+ makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, false, true);
@@ -4603,13 +4615,13 @@ ATPrepAlterColumnType(List **wqueue,
if (expression_returns_set(transform))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("transform expression must not return a set")));
+ errmsg("transform expression must not return a set")));
/* No subplans or aggregates, either... */
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in transform expression")));
+ errmsg("cannot use subquery in transform expression")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
@@ -4646,9 +4658,9 @@ ATPrepAlterColumnType(List **wqueue,
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However,
- * if we are told not to recurse, there had better not be any
- * child tables; else the alter would put them out of step.
+ * The recursion case is handled by ATSimpleRecursion. However, if we
+ * are told not to recurse, there had better not be any child tables;
+ * else the alter would put them out of step.
*/
if (recurse)
ATSimpleRecursion(wqueue, rel, cmd, recurse);
@@ -4683,15 +4695,15 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName);
if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attTup = (Form_pg_attribute) GETSTRUCT(heapTup);
attnum = attTup->attnum;
/* Check for multiple ALTER TYPE on same column --- can't cope */
- if (attTup->atttypid != tab->oldDesc->attrs[attnum-1]->atttypid ||
- attTup->atttypmod != tab->oldDesc->attrs[attnum-1]->atttypmod)
+ if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1]->atttypid ||
+ attTup->atttypmod != tab->oldDesc->attrs[attnum - 1]->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot alter type of column \"%s\" twice",
@@ -4713,8 +4725,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
{
defaultexpr = build_column_default(rel, attnum);
Assert(defaultexpr);
- defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
- defaultexpr, exprType(defaultexpr),
+ defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
+ defaultexpr, exprType(defaultexpr),
targettype, typename->typmod,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
@@ -4728,18 +4740,18 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
defaultexpr = NULL;
/*
- * Find everything that depends on the column (constraints, indexes, etc),
- * and record enough information to let us recreate the objects.
+ * Find everything that depends on the column (constraints, indexes,
+ * etc), and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to save
- * the info before executing ALTER TYPE, though, else the deparser will
- * get confused.
+ * performed all the individual ALTER TYPE operations. We have to
+ * save the info before executing ALTER TYPE, though, else the
+ * deparser will get confused.
*
* There could be multiple entries for the same object, so we must check
- * to ensure we process each one only once. Note: we assume that an index
- * that implements a constraint will not show a direct dependency on the
- * column.
+ * to ensure we process each one only once. Note: we assume that an
+ * index that implements a constraint will not show a direct
+ * dependency on the column.
*/
depRel = heap_openr(DependRelationName, RowExclusiveLock);
@@ -4761,8 +4773,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
while (HeapTupleIsValid(depTup = systable_getnext(scan)))
{
- Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
- ObjectAddress foundObject;
+ Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
+ ObjectAddress foundObject;
/* We don't expect any PIN dependencies on columns */
if (foundDep->deptype == DEPENDENCY_PIN)
@@ -4775,45 +4787,45 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
switch (getObjectClass(&foundObject))
{
case OCLASS_CLASS:
- {
- char relKind = get_rel_relkind(foundObject.objectId);
-
- if (relKind == RELKIND_INDEX)
{
- Assert(foundObject.objectSubId == 0);
- if (!list_member_oid(tab->changedIndexOids, foundObject.objectId))
+ char relKind = get_rel_relkind(foundObject.objectId);
+
+ if (relKind == RELKIND_INDEX)
{
- tab->changedIndexOids = lappend_oid(tab->changedIndexOids,
- foundObject.objectId);
- tab->changedIndexDefs = lappend(tab->changedIndexDefs,
- pg_get_indexdef_string(foundObject.objectId));
+ Assert(foundObject.objectSubId == 0);
+ if (!list_member_oid(tab->changedIndexOids, foundObject.objectId))
+ {
+ tab->changedIndexOids = lappend_oid(tab->changedIndexOids,
+ foundObject.objectId);
+ tab->changedIndexDefs = lappend(tab->changedIndexDefs,
+ pg_get_indexdef_string(foundObject.objectId));
+ }
}
+ else if (relKind == RELKIND_SEQUENCE)
+ {
+ /*
+ * This must be a SERIAL column's sequence. We
+ * need not do anything to it.
+ */
+ Assert(foundObject.objectSubId == 0);
+ }
+ else
+ {
+ /* Not expecting any other direct dependencies... */
+ elog(ERROR, "unexpected object depending on column: %s",
+ getObjectDescription(&foundObject));
+ }
+ break;
}
- else if (relKind == RELKIND_SEQUENCE)
- {
- /*
- * This must be a SERIAL column's sequence. We need not
- * do anything to it.
- */
- Assert(foundObject.objectSubId == 0);
- }
- else
- {
- /* Not expecting any other direct dependencies... */
- elog(ERROR, "unexpected object depending on column: %s",
- getObjectDescription(&foundObject));
- }
- break;
- }
case OCLASS_CONSTRAINT:
Assert(foundObject.objectSubId == 0);
if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId))
{
tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids,
- foundObject.objectId);
+ foundObject.objectId);
tab->changedConstraintDefs = lappend(tab->changedConstraintDefs,
- pg_get_constraintdef_string(foundObject.objectId));
+ pg_get_constraintdef_string(foundObject.objectId));
}
break;
@@ -4828,9 +4840,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
break;
case OCLASS_DEFAULT:
+
/*
- * Ignore the column's default expression, since we will fix
- * it below.
+ * Ignore the column's default expression, since we will
+ * fix it below.
*/
Assert(defaultexpr);
break;
@@ -4844,6 +4857,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
case OCLASS_OPCLASS:
case OCLASS_TRIGGER:
case OCLASS_SCHEMA:
+
/*
* We don't expect any of these sorts of objects to depend
* on a column.
@@ -4883,7 +4897,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
while (HeapTupleIsValid(depTup = systable_getnext(scan)))
{
- Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
+ Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
if (foundDep->deptype != DEPENDENCY_NORMAL)
elog(ERROR, "found unexpected dependency type '%c'",
@@ -4900,8 +4914,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
heap_close(depRel, RowExclusiveLock);
/*
- * Here we go --- change the recorded column type. (Note heapTup is
- * a copy of the syscache entry, so okay to scribble on.)
+ * Here we go --- change the recorded column type. (Note heapTup is a
+ * copy of the syscache entry, so okay to scribble on.)
*/
attTup->atttypid = targettype;
attTup->atttypmod = typename->typmod;
@@ -4923,15 +4937,18 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
/* Install dependency on new datatype */
add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype);
- /* Drop any pg_statistic entry for the column, since it's now wrong type */
+ /*
+ * Drop any pg_statistic entry for the column, since it's now wrong
+ * type
+ */
RemoveStatistics(RelationGetRelid(rel), attnum);
/*
- * Update the default, if present, by brute force --- remove and re-add
- * the default. Probably unsafe to take shortcuts, since the new version
- * may well have additional dependencies. (It's okay to do this now,
- * rather than after other ALTER TYPE commands, since the default won't
- * depend on other column types.)
+ * Update the default, if present, by brute force --- remove and
+ * re-add the default. Probably unsafe to take shortcuts, since the
+ * new version may well have additional dependencies. (It's okay to
+ * do this now, rather than after other ALTER TYPE commands, since the
+ * default won't depend on other column types.)
*/
if (defaultexpr)
{
@@ -4939,8 +4956,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
CommandCounterIncrement();
/*
- * We use RESTRICT here for safety, but at present we do not expect
- * anything to depend on the default.
+ * We use RESTRICT here for safety, but at present we do not
+ * expect anything to depend on the default.
*/
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true);
@@ -4960,31 +4977,26 @@ static void
ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
{
ObjectAddress obj;
- ListCell *l;
+ ListCell *l;
/*
* Re-parse the index and constraint definitions, and attach them to
- * the appropriate work queue entries. We do this before dropping
+ * the appropriate work queue entries. We do this before dropping
* because in the case of a FOREIGN KEY constraint, we might not yet
- * have exclusive lock on the table the constraint is attached to,
- * and we need to get that before dropping. It's safe because the
- * parser won't actually look at the catalogs to detect the existing
- * entry.
+ * have exclusive lock on the table the constraint is attached to, and
+ * we need to get that before dropping. It's safe because the parser
+ * won't actually look at the catalogs to detect the existing entry.
*/
foreach(l, tab->changedIndexDefs)
- {
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
- }
foreach(l, tab->changedConstraintDefs)
- {
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
- }
/*
- * Now we can drop the existing constraints and indexes --- constraints
- * first, since some of them might depend on the indexes. It should be
- * okay to use DROP_RESTRICT here, since nothing else should be depending
- * on these objects.
+ * Now we can drop the existing constraints and indexes ---
+ * constraints first, since some of them might depend on the indexes.
+ * It should be okay to use DROP_RESTRICT here, since nothing else
+ * should be depending on these objects.
*/
if (tab->changedConstraintOids)
obj.classId = get_system_catalog_relid(ConstraintRelationName);
@@ -5017,8 +5029,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
ListCell *list_item;
/*
- * We expect that we only have to do raw parsing and parse analysis, not
- * any rule rewriting, since these will all be utility statements.
+ * We expect that we only have to do raw parsing and parse analysis,
+ * not any rule rewriting, since these will all be utility statements.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
@@ -5027,12 +5039,13 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
Node *parsetree = (Node *) lfirst(list_item);
querytree_list = list_concat(querytree_list,
- parse_analyze(parsetree, NULL, 0));
+ parse_analyze(parsetree, NULL, 0));
}
/*
- * Attach each generated command to the proper place in the work queue.
- * Note this could result in creation of entirely new work-queue entries.
+ * Attach each generated command to the proper place in the work
+ * queue. Note this could result in creation of entirely new
+ * work-queue entries.
*/
foreach(list_item, querytree_list)
{
@@ -5045,50 +5058,50 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
switch (nodeTag(query->utilityStmt))
{
case T_IndexStmt:
- {
- IndexStmt *stmt = (IndexStmt *) query->utilityStmt;
- AlterTableCmd *newcmd;
-
- rel = relation_openrv(stmt->relation, AccessExclusiveLock);
- tab = ATGetQueueEntry(wqueue, rel);
- newcmd = makeNode(AlterTableCmd);
- newcmd->subtype = AT_ReAddIndex;
- newcmd->def = (Node *) stmt;
- tab->subcmds[AT_PASS_OLD_INDEX] =
- lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd);
- relation_close(rel, NoLock);
- break;
- }
+ {
+ IndexStmt *stmt = (IndexStmt *) query->utilityStmt;
+ AlterTableCmd *newcmd;
+
+ rel = relation_openrv(stmt->relation, AccessExclusiveLock);
+ tab = ATGetQueueEntry(wqueue, rel);
+ newcmd = makeNode(AlterTableCmd);
+ newcmd->subtype = AT_ReAddIndex;
+ newcmd->def = (Node *) stmt;
+ tab->subcmds[AT_PASS_OLD_INDEX] =
+ lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd);
+ relation_close(rel, NoLock);
+ break;
+ }
case T_AlterTableStmt:
- {
- AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt;
- ListCell *lcmd;
-
- rel = relation_openrv(stmt->relation, AccessExclusiveLock);
- tab = ATGetQueueEntry(wqueue, rel);
- foreach(lcmd, stmt->cmds)
{
- AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
+ AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt;
+ ListCell *lcmd;
- switch (cmd->subtype)
+ rel = relation_openrv(stmt->relation, AccessExclusiveLock);
+ tab = ATGetQueueEntry(wqueue, rel);
+ foreach(lcmd, stmt->cmds)
{
- case AT_AddIndex:
- cmd->subtype = AT_ReAddIndex;
- tab->subcmds[AT_PASS_OLD_INDEX] =
- lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd);
- break;
- case AT_AddConstraint:
- tab->subcmds[AT_PASS_OLD_CONSTR] =
- lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd);
- break;
- default:
- elog(ERROR, "unexpected statement type: %d",
- (int) cmd->subtype);
+ AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
+
+ switch (cmd->subtype)
+ {
+ case AT_AddIndex:
+ cmd->subtype = AT_ReAddIndex;
+ tab->subcmds[AT_PASS_OLD_INDEX] =
+ lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd);
+ break;
+ case AT_AddConstraint:
+ tab->subcmds[AT_PASS_OLD_CONSTR] =
+ lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd);
+ break;
+ default:
+ elog(ERROR, "unexpected statement type: %d",
+ (int) cmd->subtype);
+ }
}
+ relation_close(rel, NoLock);
+ break;
}
- relation_close(rel, NoLock);
- break;
- }
default:
elog(ERROR, "unexpected statement type: %d",
(int) nodeTag(query->utilityStmt));
@@ -5116,8 +5129,8 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId)
class_rel = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(relationOid),
- 0, 0, 0);
+ ObjectIdGetDatum(relationOid),
+ 0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", relationOid);
tuple_class = (Form_pg_class) GETSTRUCT(tuple);
@@ -5139,7 +5152,7 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId)
NameStr(tuple_class->relname))));
}
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -5148,7 +5161,7 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId)
Datum repl_val[Natts_pg_class];
char repl_null[Natts_pg_class];
char repl_repl[Natts_pg_class];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
@@ -5156,8 +5169,8 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId)
/* Otherwise, check that we are the superuser */
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to change owner")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to change owner")));
memset(repl_null, ' ', sizeof(repl_null));
memset(repl_repl, ' ', sizeof(repl_repl));
@@ -5188,9 +5201,9 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId)
heap_freetuple(newtuple);
/*
- * If we are operating on a table, also change the ownership of any
- * indexes that belong to the table, as well as the table's toast
- * table (if it has one)
+ * If we are operating on a table, also change the ownership of
+ * any indexes that belong to the table, as well as the table's
+ * toast table (if it has one)
*/
if (tuple_class->relkind == RELKIND_RELATION ||
tuple_class->relkind == RELKIND_TOASTVALUE)
@@ -5265,7 +5278,7 @@ static void
ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename)
{
Oid tablespaceId;
- AclResult aclresult;
+ AclResult aclresult;
/*
* We do our own permission checking because we want to allow this on
@@ -5294,7 +5307,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename)
if (!OidIsValid(tablespaceId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist", tablespacename)));
+ errmsg("tablespace \"%s\" does not exist", tablespacename)));
/* Check its permissions */
aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE);
@@ -5305,7 +5318,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename)
if (OidIsValid(tab->newTableSpace))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("multiple SET TABLESPACE subcommands are not valid")));
+ errmsg("multiple SET TABLESPACE subcommands are not valid")));
tab->newTableSpace = tablespaceId;
}
@@ -5339,13 +5352,13 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace)
RelationGetRelationName(rel))));
/*
- * Don't allow moving temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow moving temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move temporary tables of other sessions")));
+ errmsg("cannot move temporary tables of other sessions")));
/*
* No work if no change in tablespace.
@@ -5425,14 +5438,15 @@ copy_relation_data(Relation rel, SMgrRelation dst)
bool use_wal;
BlockNumber nblocks;
BlockNumber blkno;
- char buf[BLCKSZ];
+ char buf[BLCKSZ];
Page page = (Page) buf;
/*
- * Since we copy the data directly without looking at the shared buffers,
- * we'd better first flush out any pages of the source relation that are
- * in shared buffers. We assume no new pages will get loaded into
- * buffers while we are holding exclusive lock on the rel.
+ * Since we copy the data directly without looking at the shared
+ * buffers, we'd better first flush out any pages of the source
+ * relation that are in shared buffers. We assume no new pages will
+ * get loaded into buffers while we are holding exclusive lock on the
+ * rel.
*/
FlushRelationBuffers(rel, 0);
@@ -5479,7 +5493,7 @@ copy_relation_data(Relation rel, SMgrRelation dst)
}
/*
- * Now write the page. We say isTemp = true even if it's not a
+ * Now write the page. We say isTemp = true even if it's not a
* temp rel, because there's no need for smgr to schedule an fsync
* for this write; we'll do it ourselves below.
*/
@@ -5488,18 +5502,18 @@ copy_relation_data(Relation rel, SMgrRelation dst)
/*
* If the rel isn't temp, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a temp rel we don't care
+ * safe to commit the transaction. (For a temp rel we don't care
* since the rel will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the copy.
- * It's less obvious that we have to do it even if we did WAL-log the
+ * It's obvious that we must do this when not WAL-logging the copy. It's
+ * less obvious that we have to do it even if we did WAL-log the
* copied pages. The reason is that since we're copying outside
* shared buffers, a CHECKPOINT occurring during the copy has no way
* to flush the previously written data to disk (indeed it won't know
- * the new rel even exists). A crash later on would replay WAL from the
- * checkpoint, therefore it wouldn't replay our earlier WAL entries.
- * If we do not fsync those pages here, they might still not be on disk
- * when the crash occurs.
+ * the new rel even exists). A crash later on would replay WAL from
+ * the checkpoint, therefore it wouldn't replay our earlier WAL
+ * entries. If we do not fsync those pages here, they might still not
+ * be on disk when the crash occurs.
*/
if (!rel->rd_istemp)
smgrimmedsync(dst);
@@ -5510,7 +5524,7 @@ copy_relation_data(Relation rel, SMgrRelation dst)
*
* Note: this is also invoked from outside this module; in such cases we
* expect the caller to have verified that the relation is a table and we
- * have all the right permissions. Callers expect this function
+ * have all the right permissions. Callers expect this function
* to end with CommandCounterIncrement if it makes any changes.
*/
void
@@ -5532,8 +5546,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction. (This is probably redundant
- * in all present uses...)
+ * release until end of transaction. (This is probably redundant in
+ * all present uses...)
*/
rel = heap_open(relOid, AccessExclusiveLock);
@@ -5543,15 +5557,15 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
* We cannot allow toasting a shared relation after initdb (because
* there's no way to mark it toasted in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
- * standalone backend (toasting happens after the bootstrap phase,
- * so checking IsBootstrapProcessingMode() won't work). However, we can
+ * standalone backend (toasting happens after the bootstrap phase, so
+ * checking IsBootstrapProcessingMode() won't work). However, we can
* at least prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared tables cannot be toasted after initdb")));
+ errmsg("shared tables cannot be toasted after initdb")));
/*
* Is it already toasted?
@@ -5894,8 +5908,8 @@ PreCommit_on_commit_actions(void)
void
AtEOXact_on_commit_actions(bool isCommit, TransactionId xid)
{
- ListCell *cur_item;
- ListCell *prev_item;
+ ListCell *cur_item;
+ ListCell *prev_item;
prev_item = NULL;
cur_item = list_head(on_commits);
@@ -5930,15 +5944,15 @@ AtEOXact_on_commit_actions(bool isCommit, TransactionId xid)
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
*
* During subabort, we can immediately remove entries created during this
- * subtransaction. During subcommit, just relabel entries marked during
+ * subtransaction. During subcommit, just relabel entries marked during
* this subtransaction as being the parent's responsibility.
*/
void
AtEOSubXact_on_commit_actions(bool isCommit, TransactionId childXid,
TransactionId parentXid)
{
- ListCell *cur_item;
- ListCell *prev_item;
+ ListCell *cur_item;
+ ListCell *prev_item;
prev_item = NULL;
cur_item = list_head(on_commits);
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 05a13315a1f..15fe8392882 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -35,7 +35,7 @@
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* provision that a zero in pg_class.reltablespace means the database's
- * default tablespace. Without this, CREATE DATABASE would have to go in
+ * default tablespace. Without this, CREATE DATABASE would have to go in
* and munge the system catalogs of the new database. This special meaning
* of zero also applies in pg_namespace.nsptablespace.
*
@@ -45,7 +45,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.8 2004/08/08 01:31:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.9 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,11 +95,11 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
{
#ifdef HAVE_SYMLINK
struct stat st;
- char *dir;
+ char *dir;
/*
- * The global tablespace doesn't have per-database subdirectories,
- * so nothing to do for it.
+ * The global tablespace doesn't have per-database subdirectories, so
+ * nothing to do for it.
*/
if (spcNode == GLOBALTABLESPACE_OID)
return;
@@ -118,7 +118,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
* DROP TABLESPACE or TablespaceCreateDbspace is running
* concurrently. Simple reads from pg_tablespace are OK.
*/
- Relation rel;
+ Relation rel;
if (!isRedo)
rel = heap_openr(TableSpaceRelationName, ExclusiveLock);
@@ -126,8 +126,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
rel = NULL;
/*
- * Recheck to see if someone created the directory while
- * we were waiting for lock.
+ * Recheck to see if someone created the directory while we
+ * were waiting for lock.
*/
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
{
@@ -139,8 +139,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
if (mkdir(dir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- dir)));
+ errmsg("could not create directory \"%s\": %m",
+ dir)));
}
/* OK to drop the exclusive lock */
@@ -165,7 +165,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
}
pfree(dir);
-#endif /* HAVE_SYMLINK */
+#endif /* HAVE_SYMLINK */
}
/*
@@ -179,13 +179,13 @@ void
CreateTableSpace(CreateTableSpaceStmt *stmt)
{
#ifdef HAVE_SYMLINK
- Relation rel;
- Datum values[Natts_pg_tablespace];
+ Relation rel;
+ Datum values[Natts_pg_tablespace];
char nulls[Natts_pg_tablespace];
HeapTuple tuple;
Oid tablespaceoid;
- char *location;
- char *linkloc;
+ char *location;
+ char *linkloc;
AclId ownerid;
/* validate */
@@ -196,10 +196,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
/* Must be super user */
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to create tablespace \"%s\"",
- stmt->tablespacename),
- errhint("Must be superuser to create a tablespace.")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied to create tablespace \"%s\"",
+ stmt->tablespacename),
+ errhint("Must be superuser to create a tablespace.")));
/* However, the eventual owner of the tablespace need not be */
if (stmt->owner)
@@ -218,7 +218,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
if (strchr(location, '\''))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("tablespace location may not contain single quotes")));
+ errmsg("tablespace location may not contain single quotes")));
/*
* Allowing relative paths seems risky
@@ -231,9 +231,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
errmsg("tablespace location must be an absolute path")));
/*
- * Check that location isn't too long. Remember that we're going to append
- * '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole path
- * explicitly? This may be overly conservative.)
+ * Check that location isn't too long. Remember that we're going to
+ * append '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole
+ * path explicitly? This may be overly conservative.)
*/
if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10))
ereport(ERROR,
@@ -250,12 +250,12 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"",
stmt->tablespacename),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/*
- * Check that there is no other tablespace by this name. (The
- * unique index would catch this anyway, but might as well give
- * a friendlier message.)
+ * Check that there is no other tablespace by this name. (The unique
+ * index would catch this anyway, but might as well give a friendlier
+ * message.)
*/
if (OidIsValid(get_tablespace_oid(stmt->tablespacename)))
ereport(ERROR,
@@ -293,14 +293,14 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
heap_freetuple(tuple);
/*
- * Attempt to coerce target directory to safe permissions. If this
+ * Attempt to coerce target directory to safe permissions. If this
* fails, it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
/*
* Check the target directory is empty.
@@ -312,10 +312,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
location)));
/*
- * Create the PG_VERSION file in the target directory. This has several
- * purposes: to make sure we can write in the directory, to prevent
- * someone from creating another tablespace pointing at the same
- * directory (the emptiness check above will fail), and to label
+ * Create the PG_VERSION file in the target directory. This has
+ * several purposes: to make sure we can write in the directory, to
+ * prevent someone from creating another tablespace pointing at the
+ * same directory (the emptiness check above will fail), and to label
* tablespace directories by PG version.
*/
set_short_version(location);
@@ -337,11 +337,11 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
heap_close(rel, RowExclusiveLock);
-#else /* !HAVE_SYMLINK */
+#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("tablespaces are not supported on this platform")));
-#endif /* HAVE_SYMLINK */
+#endif /* HAVE_SYMLINK */
}
/*
@@ -353,23 +353,24 @@ void
DropTableSpace(DropTableSpaceStmt *stmt)
{
#ifdef HAVE_SYMLINK
- char *tablespacename = stmt->tablespacename;
- HeapScanDesc scandesc;
- Relation rel;
- HeapTuple tuple;
- ScanKeyData entry[1];
- char *location;
- Oid tablespaceoid;
- DIR *dirdesc;
+ char *tablespacename = stmt->tablespacename;
+ HeapScanDesc scandesc;
+ Relation rel;
+ HeapTuple tuple;
+ ScanKeyData entry[1];
+ char *location;
+ Oid tablespaceoid;
+ DIR *dirdesc;
struct dirent *de;
- char *subfile;
+ char *subfile;
/* don't call this in a transaction block */
PreventTransactionChain((void *) stmt, "DROP TABLESPACE");
/*
* Acquire ExclusiveLock on pg_tablespace to ensure that no one else
- * is trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently.
+ * is trying to do DROP TABLESPACE or TablespaceCreateDbspace
+ * concurrently.
*/
rel = heap_openr(TableSpaceRelationName, ExclusiveLock);
@@ -409,15 +410,15 @@ DropTableSpace(DropTableSpaceStmt *stmt)
/*
* Check if the tablespace still contains any files. We try to rmdir
* each per-database directory we find in it. rmdir failure implies
- * there are still files in that subdirectory, so give up. (We do not
- * have to worry about undoing any already completed rmdirs, since
- * the next attempt to use the tablespace from that database will simply
+ * there are still files in that subdirectory, so give up. (We do not
+ * have to worry about undoing any already completed rmdirs, since the
+ * next attempt to use the tablespace from that database will simply
* recreate the subdirectory via TablespaceCreateDbspace.)
*
- * Since we hold exclusive lock, no one else should be creating any
- * fresh subdirectories in parallel. It is possible that new files
- * are being created within subdirectories, though, so the rmdir
- * call could fail. Worst consequence is a less friendly error message.
+ * Since we hold exclusive lock, no one else should be creating any fresh
+ * subdirectories in parallel. It is possible that new files are
+ * being created within subdirectories, though, so the rmdir call
+ * could fail. Worst consequence is a less friendly error message.
*/
dirdesc = AllocateDir(location);
if (dirdesc == NULL)
@@ -458,8 +459,11 @@ DropTableSpace(DropTableSpaceStmt *stmt)
pfree(subfile);
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
@@ -494,15 +498,15 @@ DropTableSpace(DropTableSpaceStmt *stmt)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not remove junction dir \"%s\": %m",
- location)));
+ location)));
#endif
pfree(subfile);
pfree(location);
/*
- * We have successfully destroyed the infrastructure ... there is
- * now no way to roll back the DROP ... so proceed to remove the
+ * We have successfully destroyed the infrastructure ... there is now
+ * no way to roll back the DROP ... so proceed to remove the
* pg_tablespace tuple.
*/
simple_heap_delete(rel, &tuple->t_self);
@@ -511,11 +515,11 @@ DropTableSpace(DropTableSpaceStmt *stmt)
heap_close(rel, ExclusiveLock);
-#else /* !HAVE_SYMLINK */
+#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("tablespaces are not supported on this platform")));
-#endif /* HAVE_SYMLINK */
+#endif /* HAVE_SYMLINK */
}
@@ -579,7 +583,7 @@ set_short_version(const char *path)
static bool
directory_is_empty(const char *path)
{
- DIR *dirdesc;
+ DIR *dirdesc;
struct dirent *de;
dirdesc = AllocateDir(path);
@@ -602,8 +606,11 @@ directory_is_empty(const char *path)
return false;
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
@@ -624,11 +631,11 @@ directory_is_empty(const char *path)
Oid
get_tablespace_oid(const char *tablespacename)
{
- Oid result;
- Relation rel;
+ Oid result;
+ Relation rel;
HeapScanDesc scandesc;
HeapTuple tuple;
- ScanKeyData entry[1];
+ ScanKeyData entry[1];
/* Search pg_tablespace */
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
@@ -645,8 +652,8 @@ get_tablespace_oid(const char *tablespacename)
else
result = InvalidOid;
- heap_endscan(scandesc);
- heap_close(rel, AccessShareLock);
+ heap_endscan(scandesc);
+ heap_close(rel, AccessShareLock);
return result;
}
@@ -659,11 +666,11 @@ get_tablespace_oid(const char *tablespacename)
char *
get_tablespace_name(Oid spc_oid)
{
- char *result;
- Relation rel;
+ char *result;
+ Relation rel;
HeapScanDesc scandesc;
HeapTuple tuple;
- ScanKeyData entry[1];
+ ScanKeyData entry[1];
/* Search pg_tablespace */
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
@@ -681,8 +688,8 @@ get_tablespace_name(Oid spc_oid)
else
result = NULL;
- heap_endscan(scandesc);
- heap_close(rel, AccessShareLock);
+ heap_endscan(scandesc);
+ heap_close(rel, AccessShareLock);
return result;
}
@@ -693,8 +700,8 @@ get_tablespace_name(Oid spc_oid)
void
RenameTableSpace(const char *oldname, const char *newname)
{
- Relation rel;
- ScanKeyData entry[1];
+ Relation rel;
+ ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple tup;
HeapTuple newtuple;
@@ -729,7 +736,7 @@ RenameTableSpace(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/* Make sure the new name doesn't exist */
ScanKeyInit(&entry[0],
@@ -743,7 +750,7 @@ RenameTableSpace(const char *oldname, const char *newname)
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("tablespace \"%s\" already exists",
newname)));
-
+
heap_endscan(scan);
/* OK, update the entry */
@@ -761,8 +768,8 @@ RenameTableSpace(const char *oldname, const char *newname)
void
AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
{
- Relation rel;
- ScanKeyData entry[1];
+ Relation rel;
+ ScanKeyData entry[1];
HeapScanDesc scandesc;
Form_pg_tablespace spcForm;
HeapTuple tup;
@@ -783,7 +790,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
spcForm = (Form_pg_tablespace) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -792,7 +799,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
Datum repl_val[Natts_pg_tablespace];
char repl_null[Natts_pg_tablespace];
char repl_repl[Natts_pg_tablespace];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
@@ -814,9 +821,9 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
* necessary when the ACL is non-null.
*/
aclDatum = heap_getattr(tup,
- Anum_pg_tablespace_spcacl,
- RelationGetDescr(rel),
- &isNull);
+ Anum_pg_tablespace_spcacl,
+ RelationGetDescr(rel),
+ &isNull);
if (!isNull)
{
newAcl = aclnewowner(DatumGetAclP(aclDatum),
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index dfc8098782d..7e73f6b000f 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.167 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.168 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -480,8 +480,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for table \"%s\" does not exist",
- trigname, get_rel_name(relid))));
+ errmsg("trigger \"%s\" for table \"%s\" does not exist",
+ trigname, get_rel_name(relid))));
if (!pg_class_ownercheck(relid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
@@ -694,8 +694,8 @@ renametrig(Oid relid,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for table \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for table \"%s\" does not exist",
+ oldname, RelationGetRelationName(targetrel))));
}
systable_endscan(tgscan);
@@ -1638,7 +1638,7 @@ ltrmark:;
* Deferred trigger stuff
*
* The DeferredTriggersData struct holds data about pending deferred
- * trigger events during the current transaction tree. The struct and
+ * trigger events during the current transaction tree. The struct and
* most of its subsidiary data are kept in TopTransactionContext; however
* the individual event records are kept in CurTransactionContext, so that
* they will easily go away during subtransaction abort.
@@ -1670,7 +1670,7 @@ ltrmark:;
* saves a copy, which we use to restore the state if we abort.
*
* numpushed and numalloc keep control of allocation and storage in the above
- * stacks. numpushed is essentially the current subtransaction nesting depth.
+ * stacks. numpushed is essentially the current subtransaction nesting depth.
*
* XXX We need to be able to save the per-event data in a file if it grows too
* large.
@@ -1723,11 +1723,11 @@ typedef struct DeferredTriggerStatusData *DeferredTriggerStatus;
*/
typedef struct DeferredTriggerStateData
{
- bool all_isset;
- bool all_isdeferred;
- int numstates; /* number of trigstates[] entries in use */
- int numalloc; /* allocated size of trigstates[] */
- DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */
+ bool all_isset;
+ bool all_isdeferred;
+ int numstates; /* number of trigstates[] entries in use */
+ int numalloc; /* allocated size of trigstates[] */
+ DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */
} DeferredTriggerStateData;
typedef DeferredTriggerStateData *DeferredTriggerState;
@@ -1735,15 +1735,15 @@ typedef DeferredTriggerStateData *DeferredTriggerState;
/* Per-transaction data */
typedef struct DeferredTriggersData
{
- DeferredTriggerState state;
- DeferredTriggerEvent events;
- DeferredTriggerEvent tail_thisxact;
- DeferredTriggerEvent events_imm;
- DeferredTriggerEvent *tail_stack;
- DeferredTriggerEvent *imm_stack;
- DeferredTriggerState *state_stack;
- int numpushed;
- int numalloc;
+ DeferredTriggerState state;
+ DeferredTriggerEvent events;
+ DeferredTriggerEvent tail_thisxact;
+ DeferredTriggerEvent events_imm;
+ DeferredTriggerEvent *tail_stack;
+ DeferredTriggerEvent *imm_stack;
+ DeferredTriggerState *state_stack;
+ int numpushed;
+ int numalloc;
} DeferredTriggersData;
typedef DeferredTriggersData *DeferredTriggers;
@@ -1757,7 +1757,7 @@ static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
static DeferredTriggerState DeferredTriggerStateCreate(int numalloc);
static DeferredTriggerState DeferredTriggerStateCopy(DeferredTriggerState state);
static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState state,
- Oid tgoid, bool tgisdeferred);
+ Oid tgoid, bool tgisdeferred);
/* ----------
@@ -1770,8 +1770,8 @@ static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState sta
static bool
deferredTriggerCheckState(Oid tgoid, int32 itemstate)
{
- bool tgisdeferred;
- int i;
+ bool tgisdeferred;
+ int i;
/*
* For not-deferrable triggers (i.e. normal AFTER ROW triggers and
@@ -1798,7 +1798,8 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
/*
* No ALL state known either, remember the default state as the
- * current and return that. (XXX why do we bother making a state entry?)
+ * current and return that. (XXX why do we bother making a state
+ * entry?)
*/
tgisdeferred = ((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0);
deferredTriggers->state =
@@ -1982,8 +1983,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
/*
* If immediate_only is true, then the only events that could need
- * firing are those since events_imm. (But if
- * events_imm is NULL, we must scan the entire list.)
+ * firing are those since events_imm. (But if events_imm is NULL, we
+ * must scan the entire list.)
*/
if (immediate_only && deferredTriggers->events_imm != NULL)
{
@@ -2003,13 +2004,13 @@ deferredTriggerInvokeEvents(bool immediate_only)
int i;
/*
- * Skip executing cancelled events, and events done by transactions
- * that are not aborted.
+ * Skip executing cancelled events, and events done by
+ * transactions that are not aborted.
*/
if (!(event->dte_event & TRIGGER_DEFERRED_CANCELED) ||
- (event->dte_event & TRIGGER_DEFERRED_DONE &&
- TransactionIdIsValid(event->dte_done_xid) &&
- !TransactionIdDidAbort(event->dte_done_xid)))
+ (event->dte_event & TRIGGER_DEFERRED_DONE &&
+ TransactionIdIsValid(event->dte_done_xid) &&
+ !TransactionIdDidAbort(event->dte_done_xid)))
{
MemoryContextReset(per_tuple_context);
@@ -2019,8 +2020,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
for (i = 0; i < event->dte_n_items; i++)
{
if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE &&
- TransactionIdIsValid(event->dte_item[i].dti_done_xid) &&
- !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid)))
+ TransactionIdIsValid(event->dte_item[i].dti_done_xid) &&
+ !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid)))
continue;
/*
@@ -2097,8 +2098,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
{
/*
* We can drop an item if it's done, but only if we're not
- * inside a subtransaction because it could abort later on.
- * We will want to check the item again if it does.
+ * inside a subtransaction because it could abort later on. We
+ * will want to check the item again if it does.
*/
if (immediate_only && !IsSubTransaction())
{
@@ -2209,8 +2210,8 @@ DeferredTriggerEndXact(void)
/*
* Forget everything we know about deferred triggers.
*
- * Since all the info is in TopTransactionContext or children thereof,
- * we need do nothing special to reclaim memory.
+ * Since all the info is in TopTransactionContext or children thereof, we
+ * need do nothing special to reclaim memory.
*/
deferredTriggers = NULL;
}
@@ -2236,8 +2237,8 @@ DeferredTriggerAbortXact(void)
/*
* Forget everything we know about deferred triggers.
*
- * Since all the info is in TopTransactionContext or children thereof,
- * we need do nothing special to reclaim memory.
+ * Since all the info is in TopTransactionContext or children thereof, we
+ * need do nothing special to reclaim memory.
*/
deferredTriggers = NULL;
}
@@ -2285,13 +2286,13 @@ DeferredTriggerBeginSubXact(void)
deferredTriggers->tail_stack = (DeferredTriggerEvent *)
repalloc(deferredTriggers->tail_stack,
- deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
+ deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
deferredTriggers->imm_stack = (DeferredTriggerEvent *)
repalloc(deferredTriggers->imm_stack,
- deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
+ deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
deferredTriggers->state_stack = (DeferredTriggerState *)
repalloc(deferredTriggers->state_stack,
- deferredTriggers->numalloc * sizeof(DeferredTriggerState));
+ deferredTriggers->numalloc * sizeof(DeferredTriggerState));
}
}
@@ -2358,8 +2359,8 @@ DeferredTriggerEndSubXact(bool isCommit)
deferredTriggers->tail_thisxact->dte_next = NULL;
/*
- * We don't need to free the items, since the CurTransactionContext
- * will be reset shortly.
+ * We don't need to free the items, since the
+ * CurTransactionContext will be reset shortly.
*/
/*
@@ -2393,7 +2394,7 @@ DeferredTriggerStateCreate(int numalloc)
state = (DeferredTriggerState)
MemoryContextAllocZero(TopTransactionContext,
sizeof(DeferredTriggerStateData) +
- (numalloc - 1) * sizeof(DeferredTriggerStatusData));
+ (numalloc - 1) *sizeof(DeferredTriggerStatusData));
state->numalloc = numalloc;
@@ -2429,13 +2430,13 @@ DeferredTriggerStateAddItem(DeferredTriggerState state,
{
if (state->numstates >= state->numalloc)
{
- int newalloc = state->numalloc * 2;
+ int newalloc = state->numalloc * 2;
- newalloc = Max(newalloc, 8); /* in case original has size 0 */
+ newalloc = Max(newalloc, 8); /* in case original has size 0 */
state = (DeferredTriggerState)
repalloc(state,
sizeof(DeferredTriggerStateData) +
- (newalloc - 1) * sizeof(DeferredTriggerStatusData));
+ (newalloc - 1) *sizeof(DeferredTriggerStatusData));
state->numalloc = newalloc;
Assert(state->numstates < state->numalloc);
}
@@ -2463,8 +2464,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
return;
/*
- * If in a subtransaction, and we didn't save the current state already,
- * save it so it can be restored if the subtransaction aborts.
+ * If in a subtransaction, and we didn't save the current state
+ * already, save it so it can be restored if the subtransaction
+ * aborts.
*/
if (deferredTriggers->numpushed > 0 &&
deferredTriggers->state_stack[deferredTriggers->numpushed - 1] == NULL)
@@ -2686,7 +2688,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
return;
/*
- * Create a new event. We use the CurTransactionContext so the event
+ * Create a new event. We use the CurTransactionContext so the event
* will automatically go away if the subtransaction aborts.
*/
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 8fd16fdb58d..6a438093298 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.62 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.63 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -302,8 +302,8 @@ DefineType(List *names, List *parameters)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type output function %s must return type \"cstring\"",
- NameListToString(outputName))));
+ errmsg("type output function %s must return type \"cstring\"",
+ NameListToString(outputName))));
}
if (receiveOid)
{
@@ -311,8 +311,8 @@ DefineType(List *names, List *parameters)
if (resulttype != typoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type receive function %s must return type %s",
- NameListToString(receiveName), typeName)));
+ errmsg("type receive function %s must return type %s",
+ NameListToString(receiveName), typeName)));
}
if (sendOid)
{
@@ -320,13 +320,14 @@ DefineType(List *names, List *parameters)
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type send function %s must return type \"bytea\"",
- NameListToString(sendName))));
+ errmsg("type send function %s must return type \"bytea\"",
+ NameListToString(sendName))));
}
/*
- * Convert analysis function proc name to an OID. If no analysis function
- * is specified, we'll use zero to select the built-in default algorithm.
+ * Convert analysis function proc name to an OID. If no analysis
+ * function is specified, we'll use zero to select the built-in
+ * default algorithm.
*/
if (analyzeName)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
@@ -691,7 +692,7 @@ DefineDomain(CreateDomainStmt *stmt)
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
@@ -932,8 +933,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
* arguments (data value, element OID).
*
* For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a warning and fix up the pg_proc
- * entry.
+ * type name; if we see this, we issue a warning and fix up the
+ * pg_proc entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -967,8 +968,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
{
/* Found, but must complain and fix the pg_proc entry */
ereport(WARNING,
- (errmsg("changing argument type of function %s from \"opaque\" to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from \"opaque\" to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
/*
@@ -1062,7 +1063,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
Oid procOid;
/*
- * Analyze functions always take one INTERNAL argument and return bool.
+ * Analyze functions always take one INTERNAL argument and return
+ * bool.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -1078,8 +1080,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type analyze function %s must return type \"boolean\"",
- NameListToString(procname))));
+ errmsg("type analyze function %s must return type \"boolean\"",
+ NameListToString(procname))));
return procOid;
}
@@ -1110,8 +1112,8 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
errmsg("composite type must have at least one attribute")));
/*
- * now set the parameters for keys/inheritance etc. All of these
- * are uninteresting for composite types...
+ * now set the parameters for keys/inheritance etc. All of these are
+ * uninteresting for composite types...
*/
createStmt->relation = (RangeVar *) typevar;
createStmt->tableElts = coldeflist;
@@ -1337,8 +1339,8 @@ AlterDomainNotNull(List *names, bool notNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains null values",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
- RelationGetRelationName(testrel))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
+ RelationGetRelationName(testrel))));
}
}
heap_endscan(scan);
@@ -1499,7 +1501,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("foreign key constraints not possible for domains")));
+ errmsg("foreign key constraints not possible for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -1517,13 +1519,13 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("primary key constraints not possible for domains")));
+ errmsg("primary key constraints not possible for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1604,7 +1606,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
RelationGetRelationName(testrel))));
}
@@ -2078,9 +2080,9 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
- * If it's a composite type, we need to check that it really is a
- * free-standing composite type, and not a table's underlying type.
- * We want people to use ALTER TABLE not ALTER TYPE for that case.
+ * If it's a composite type, we need to check that it really is a
+ * free-standing composite type, and not a table's underlying type. We
+ * want people to use ALTER TABLE not ALTER TYPE for that case.
*/
if (typTup->typtype == 'c' && get_rel_relkind(typTup->typrelid) != 'c')
ereport(ERROR,
@@ -2088,7 +2090,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
errmsg("\"%s\" is a table's row type",
TypeNameToString(typename))));
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
@@ -2100,7 +2102,10 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on typTup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on typTup because it's a
+ * copy
+ */
typTup->typowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 8e637367b37..e365f946b17 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.143 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.144 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,10 +46,10 @@ extern bool Password_encryption;
/*
* The need-to-update-files flags are a pair of TransactionIds that show what
- * level of the transaction tree requested the update. To register an update,
+ * level of the transaction tree requested the update. To register an update,
* the transaction saves its own TransactionId in the flag, unless the value
* was already set to a valid TransactionId. If it aborts and the value is its
- * TransactionId, it resets the value to InvalidTransactionId. If it commits,
+ * TransactionId, it resets the value to InvalidTransactionId. If it commits,
* it changes the value to its parent's TransactionId. This way the value is
* propagated up to the topmost transaction, which will update the files if a
* valid TransactionId is detected.
@@ -169,7 +169,7 @@ write_group_file(Relation grel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to temporary file \"%s\": %m", tempname)));
+ errmsg("could not write to temporary file \"%s\": %m", tempname)));
/*
* Read pg_group and write the file. Note we use SnapshotSelf to
@@ -316,7 +316,7 @@ write_user_file(Relation urel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to temporary file \"%s\": %m", tempname)));
+ errmsg("could not write to temporary file \"%s\": %m", tempname)));
/*
* Read pg_shadow and write the file. Note we use SnapshotSelf to
@@ -1009,7 +1009,7 @@ AlterUserSet(AlterUserSetStmt *stmt)
errmsg("user \"%s\" does not exist", stmt->user)));
if (!(superuser() ||
- ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
+ ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied")));
@@ -1216,14 +1216,14 @@ RenameUser(const char *oldname, const char *newname)
char repl_null[Natts_pg_shadow];
char repl_repl[Natts_pg_shadow];
int i;
-
+
/* ExclusiveLock because we need to update the password file */
rel = heap_openr(ShadowRelationName, ExclusiveLock);
dsc = RelationGetDescr(rel);
oldtuple = SearchSysCache(SHADOWNAME,
- CStringGetDatum(oldname),
- 0, 0, 0);
+ CStringGetDatum(oldname),
+ 0, 0, 0);
if (!HeapTupleIsValid(oldtuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -1259,7 +1259,7 @@ RenameUser(const char *oldname, const char *newname)
repl_repl[Anum_pg_shadow_usename - 1] = 'r';
repl_val[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(newname));
+ CStringGetDatum(newname));
repl_null[Anum_pg_shadow_usename - 1] = ' ';
datum = heap_getattr(oldtuple, Anum_pg_shadow_passwd, dsc, &isnull);
@@ -1269,14 +1269,14 @@ RenameUser(const char *oldname, const char *newname)
/* MD5 uses the username as salt, so just clear it on a rename */
repl_repl[Anum_pg_shadow_passwd - 1] = 'r';
repl_null[Anum_pg_shadow_passwd - 1] = 'n';
-
+
ereport(NOTICE,
- (errmsg("MD5 password cleared because of user rename")));
+ (errmsg("MD5 password cleared because of user rename")));
}
-
+
newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl);
simple_heap_update(rel, &oldtuple->t_self, newtuple);
-
+
CatalogUpdateIndexes(rel, newtuple);
ReleaseSysCache(oldtuple);
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 1a1cb2393f6..67c1c02b6d2 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.288 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.289 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,7 +106,7 @@ typedef struct VRelStats
* As these variables always appear together, we put them into one struct
* and pull initialization and cleanup into separate routines.
* ExecContext is used by repair_frag() and move_xxx_tuple(). More
- * accurately: It is *used* only in move_xxx_tuple(), but because this
+ * accurately: It is *used* only in move_xxx_tuple(), but because this
* routine is called many times, we initialize the struct just once in
* repair_frag() and pass it on to move_xxx_tuple().
*/
@@ -131,9 +131,9 @@ ExecContext_Init(ExecContext ec, Relation rel)
ec->estate = CreateExecutorState();
ec->resultRelInfo = makeNode(ResultRelInfo);
- ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
+ ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
ec->resultRelInfo->ri_RelationDesc = rel;
- ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
+ ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
ExecOpenIndices(ec->resultRelInfo);
@@ -154,6 +154,7 @@ ExecContext_Finish(ExecContext ec)
ExecCloseIndices(ec->resultRelInfo);
FreeExecutorState(ec->estate);
}
+
/*
* End of ExecContext Implementation
*----------------------------------------------------------------------
@@ -182,16 +183,16 @@ static void repair_frag(VRelStats *vacrelstats, Relation onerel,
VacPageList vacuum_pages, VacPageList fraged_pages,
int nindexes, Relation *Irel);
static void move_chain_tuple(Relation rel,
- Buffer old_buf, Page old_page, HeapTuple old_tup,
- Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
- ExecContext ec, ItemPointer ctid, bool cleanVpd);
+ Buffer old_buf, Page old_page, HeapTuple old_tup,
+ Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
+ ExecContext ec, ItemPointer ctid, bool cleanVpd);
static void move_plain_tuple(Relation rel,
- Buffer old_buf, Page old_page, HeapTuple old_tup,
- Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
- ExecContext ec);
+ Buffer old_buf, Page old_page, HeapTuple old_tup,
+ Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
+ ExecContext ec);
static void update_hint_bits(Relation rel, VacPageList fraged_pages,
- int num_fraged_pages, BlockNumber last_move_dest_block,
- int num_moved);
+ int num_fraged_pages, BlockNumber last_move_dest_block,
+ int num_moved);
static void vacuum_heap(VRelStats *vacrelstats, Relation onerel,
VacPageList vacpagelist);
static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage);
@@ -248,11 +249,11 @@ vacuum(VacuumStmt *vacstmt)
* Furthermore, the forced commit that occurs before truncating the
* relation's file would have the effect of committing the rest of the
* user's transaction too, which would certainly not be the desired
- * behavior. (This only applies to VACUUM FULL, though. We could
- * in theory run lazy VACUUM inside a transaction block, but we choose
- * to disallow that case because we'd rather commit as soon as possible
- * after finishing the vacuum. This is mainly so that we can let go the
- * AccessExclusiveLock that we may be holding.)
+ * behavior. (This only applies to VACUUM FULL, though. We could in
+ * theory run lazy VACUUM inside a transaction block, but we choose to
+ * disallow that case because we'd rather commit as soon as possible
+ * after finishing the vacuum. This is mainly so that we can let go
+ * the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
*/
@@ -262,9 +263,7 @@ vacuum(VacuumStmt *vacstmt)
in_outer_xact = false;
}
else
- {
in_outer_xact = IsInTransactionChain((void *) vacstmt);
- }
/*
* Send info about dead objects to the statistics collector
@@ -296,22 +295,21 @@ vacuum(VacuumStmt *vacstmt)
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit
- * XIDs, so that we can record these values at the end of the
- * VACUUM. Note that individual tables may well be processed
- * with newer values, but we can guarantee that no
- * (non-shared) relations are processed with older ones.
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
+ * so that we can record these values at the end of the VACUUM.
+ * Note that individual tables may well be processed with newer
+ * values, but we can guarantee that no (non-shared) relations are
+ * processed with older ones.
*
- * It is okay to record non-shared values in pg_database, even
- * though we may vacuum shared relations with older cutoffs,
- * because only the minimum of the values present in
- * pg_database matters. We can be sure that shared relations
- * have at some time been vacuumed with cutoffs no worse than
- * the global minimum; for, if there is a backend in some
- * other DB with xmin = OLDXMIN that's determining the cutoff
- * with which we vacuum shared relations, it is not possible
- * for that database to have a cutoff newer than OLDXMIN
- * recorded in pg_database.
+ * It is okay to record non-shared values in pg_database, even though
+ * we may vacuum shared relations with older cutoffs, because only
+ * the minimum of the values present in pg_database matters. We
+ * can be sure that shared relations have at some time been
+ * vacuumed with cutoffs no worse than the global minimum; for, if
+ * there is a backend in some other DB with xmin = OLDXMIN that's
+ * determining the cutoff with which we vacuum shared relations,
+ * it is not possible for that database to have a cutoff newer
+ * than OLDXMIN recorded in pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin,
@@ -321,8 +319,8 @@ vacuum(VacuumStmt *vacstmt)
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we
- * can release locks as soon as possible. (We could possibly use the
+ * For VACUUM (with or without ANALYZE): always do so, so that we can
+ * release locks as soon as possible. (We could possibly use the
* outer transaction for a one-table VACUUM, but handling TOAST tables
* would be problematic.)
*
@@ -333,9 +331,7 @@ vacuum(VacuumStmt *vacstmt)
* locks sooner.
*/
if (vacstmt->vacuum)
- {
use_own_xacts = true;
- }
else
{
Assert(vacstmt->analyze);
@@ -359,10 +355,10 @@ vacuum(VacuumStmt *vacstmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * vacuum_rel expects to be entered with no transaction active; it will
- * start and commit its own transaction. But we are called by an SQL
- * command, and so we are executing inside a transaction already. We
- * commit the transaction started in PostgresMain() here, and start
+ * vacuum_rel expects to be entered with no transaction active; it
+ * will start and commit its own transaction. But we are called by an
+ * SQL command, and so we are executing inside a transaction already.
+ * We commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back
* in PostgresMain().
*/
@@ -390,24 +386,24 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum)
{
if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If using separate xacts, start one for analyze. Otherwise,
- * we can use the outer transaction, but we still need to call
- * analyze_rel in a memory context that will be cleaned up on
- * return (else we leak memory while processing multiple
- * tables).
+ * If using separate xacts, start one for analyze.
+ * Otherwise, we can use the outer transaction, but we
+ * still need to call analyze_rel in a memory context that
+ * will be cleaned up on return (else we leak memory while
+ * processing multiple tables).
*/
if (use_own_xacts)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions
- * in indexes */
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
@@ -873,8 +869,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
* indexes */
/*
- * Tell the cache replacement strategy that vacuum is causing
- * all following IO
+ * Tell the cache replacement strategy that vacuum is causing all
+ * following IO
*/
StrategyHintVacuum(true);
@@ -932,9 +928,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
}
/*
- * Check that it's a plain table; we used to do this in
- * get_rel_oids() but seems safer to check after we've locked the
- * relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids()
+ * but seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
@@ -1201,7 +1196,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (PageIsNew(page))
{
- VacPage vacpagecopy;
+ VacPage vacpagecopy;
ereport(WARNING,
(errmsg("relation \"%s\" page %u is uninitialized --- fixing",
@@ -1220,7 +1215,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (PageIsEmpty(page))
{
- VacPage vacpagecopy;
+ VacPage vacpagecopy;
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
@@ -1424,7 +1419,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (do_reap || do_frag)
{
- VacPage vacpagecopy = copy_vac_page(vacpage);
+ VacPage vacpagecopy = copy_vac_page(vacpage);
+
if (do_reap)
vpage_insert(vacuum_pages, vacpagecopy);
if (do_frag)
@@ -1504,9 +1500,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead row versions cannot be removed yet.\n"
- "Nonremovable row versions range from %lu to %lu bytes long.\n"
+ "Nonremovable row versions range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable row versions) is %.0f bytes.\n"
+ "Total free space (including removable row versions) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
@@ -1544,7 +1540,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
BlockNumber last_move_dest_block = 0,
last_vacuum_block;
Page dst_page = NULL;
- ExecContextData ec;
+ ExecContextData ec;
VacPageListData Nvacpagelist;
VacPage dst_vacpage = NULL,
last_vacuum_page,
@@ -1595,13 +1591,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
blkno > last_move_dest_block;
blkno--)
{
- Buffer buf;
- Page page;
- OffsetNumber offnum,
- maxoff;
- bool isempty,
- dowrite,
- chain_tuple_moved;
+ Buffer buf;
+ Page page;
+ OffsetNumber offnum,
+ maxoff;
+ bool isempty,
+ dowrite,
+ chain_tuple_moved;
vacuum_delay_point();
@@ -1678,9 +1674,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- Size tuple_len;
- HeapTupleData tuple;
- ItemId itemid = PageGetItemId(page, offnum);
+ Size tuple_len;
+ HeapTupleData tuple;
+ ItemId itemid = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(itemid))
continue;
@@ -1693,29 +1689,29 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* VACUUM FULL has an exclusive lock on the relation. So
* normally no other transaction can have pending INSERTs or
- * DELETEs in this relation. A tuple is either
- * (a) a tuple in a system catalog, inserted or deleted by
- * a not yet committed transaction or
- * (b) dead (XMIN_INVALID or XMAX_COMMITTED) or
- * (c) inserted by a committed xact (XMIN_COMMITTED) or
- * (d) moved by the currently running VACUUM.
- * In case (a) we wouldn't be in repair_frag() at all.
+ * DELETEs in this relation. A tuple is either (a) a tuple in
+ * a system catalog, inserted or deleted by a not yet
+ * committed transaction or (b) dead (XMIN_INVALID or
+ * XMAX_COMMITTED) or (c) inserted by a committed xact
+ * (XMIN_COMMITTED) or (d) moved by the currently running
+ * VACUUM. In case (a) we wouldn't be in repair_frag() at all.
* In case (b) we cannot be here, because scan_heap() has
- * already marked the item as unused, see continue above.
- * Case (c) is what normally is to be expected.
- * Case (d) is only possible, if a whole tuple chain has been
- * moved while processing this or a higher numbered block.
+ * already marked the item as unused, see continue above. Case
+ * (c) is what normally is to be expected. Case (d) is only
+ * possible, if a whole tuple chain has been moved while
+ * processing this or a higher numbered block.
*/
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
/*
- * There cannot be another concurrently running VACUUM. If
- * the tuple had been moved in by a previous VACUUM, the
- * visibility check would have set XMIN_COMMITTED. If the
- * tuple had been moved in by the currently running VACUUM,
- * the loop would have been terminated. We had
+ * There cannot be another concurrently running VACUUM.
+ * If the tuple had been moved in by a previous VACUUM,
+ * the visibility check would have set XMIN_COMMITTED. If
+ * the tuple had been moved in by the currently running
+ * VACUUM, the loop would have been terminated. We had
* elog(ERROR, ...) here, but as we are testing for a
- * can't-happen condition, Assert() seems more appropriate.
+ * can't-happen condition, Assert() seems more
+ * appropriate.
*/
Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN));
@@ -1725,6 +1721,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* moved while cleaning this page or some previous one.
*/
Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF);
+
/*
* MOVED_OFF by another VACUUM would have caused the
* visibility check to set XMIN_COMMITTED or XMIN_INVALID.
@@ -1734,16 +1731,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Can't we Assert(keep_tuples > 0) here? */
if (keep_tuples == 0)
continue;
- if (chain_tuple_moved) /* some chains was moved
- * while */
- { /* cleaning this page */
+ if (chain_tuple_moved) /* some chains was moved while */
+ { /* cleaning this page */
Assert(vacpage->offsets_free > 0);
for (i = 0; i < vacpage->offsets_free; i++)
{
if (vacpage->offsets[i] == offnum)
break;
}
- if (i >= vacpage->offsets_free) /* not found */
+ if (i >= vacpage->offsets_free) /* not found */
{
vacpage->offsets[vacpage->offsets_free++] = offnum;
keep_tuples--;
@@ -2128,18 +2124,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
off <= maxoff;
off = OffsetNumberNext(off))
{
- ItemId itemid = PageGetItemId(page, off);
- HeapTupleHeader htup;
+ ItemId itemid = PageGetItemId(page, off);
+ HeapTupleHeader htup;
if (!ItemIdIsUsed(itemid))
continue;
htup = (HeapTupleHeader) PageGetItem(page, itemid);
if (htup->t_infomask & HEAP_XMIN_COMMITTED)
continue;
+
/*
- ** See comments in the walk-along-page loop above, why we
- ** have Asserts here instead of if (...) elog(ERROR).
- */
+ * * See comments in the walk-along-page loop above, why
+ * we * have Asserts here instead of if (...) elog(ERROR).
+ */
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
Assert(HeapTupleHeaderGetXvac(htup) == myXID);
@@ -2152,7 +2149,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (vacpage->offsets[i] == off)
break;
}
- if (i >= vacpage->offsets_free) /* not found */
+ if (i >= vacpage->offsets_free) /* not found */
{
vacpage->offsets[vacpage->offsets_free++] = off;
Assert(keep_tuples > 0);
@@ -2247,7 +2244,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*/
update_hint_bits(onerel, fraged_pages, num_fraged_pages,
last_move_dest_block, num_moved);
-
+
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
@@ -2255,11 +2252,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* processing that occurs below.
*/
ereport(elevel,
- (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
- RelationGetRelationName(onerel),
- num_moved, nblocks, blkno),
- errdetail("%s",
- vac_show_rusage(&ru0))));
+ (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ num_moved, nblocks, blkno),
+ errdetail("%s",
+ vac_show_rusage(&ru0))));
/*
* Reflect the motion of system tuples to catalog cache here.
@@ -2284,6 +2281,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*vpleft = *vpright;
*vpright = vpsave;
}
+
/*
* keep_tuples is the number of tuples that have been moved
* off a page during chain moves but not been scanned over
@@ -2301,13 +2299,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (vacpage->blkno == (blkno - 1) &&
vacpage->offsets_free > 0)
{
- Buffer buf;
- Page page;
- OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
- OffsetNumber offnum,
- maxoff;
- int uncnt;
- int num_tuples = 0;
+ Buffer buf;
+ Page page;
+ OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
+ OffsetNumber offnum,
+ maxoff;
+ int uncnt;
+ int num_tuples = 0;
buf = ReadBuffer(onerel, vacpage->blkno);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -2317,7 +2315,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemid = PageGetItemId(page, offnum);
+ ItemId itemid = PageGetItemId(page, offnum);
HeapTupleHeader htup;
if (!ItemIdIsUsed(itemid))
@@ -2327,9 +2325,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue;
/*
- ** See comments in the walk-along-page loop above, why we
- ** have Asserts here instead of if (...) elog(ERROR).
- */
+ * * See comments in the walk-along-page loop above, why
+ * we * have Asserts here instead of if (...) elog(ERROR).
+ */
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
Assert(HeapTupleHeaderGetXvac(htup) == myXID);
@@ -2418,10 +2416,10 @@ move_chain_tuple(Relation rel,
ExecContext ec, ItemPointer ctid, bool cleanVpd)
{
TransactionId myXID = GetCurrentTransactionId();
- HeapTupleData newtup;
- OffsetNumber newoff;
- ItemId newitemid;
- Size tuple_len = old_tup->t_len;
+ HeapTupleData newtup;
+ OffsetNumber newoff;
+ ItemId newitemid;
+ Size tuple_len = old_tup->t_len;
heap_copytuple_with_tuple(old_tup, &newtup);
@@ -2434,36 +2432,32 @@ move_chain_tuple(Relation rel,
START_CRIT_SECTION();
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID |
- HEAP_MOVED_IN);
+ HEAP_XMIN_INVALID |
+ HEAP_MOVED_IN);
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
/*
* If this page was not used before - clean it.
*
- * NOTE: a nasty bug used to lurk here. It is possible
- * for the source and destination pages to be the same
- * (since this tuple-chain member can be on a page
- * lower than the one we're currently processing in
- * the outer loop). If that's true, then after
- * vacuum_page() the source tuple will have been
- * moved, and tuple.t_data will be pointing at
- * garbage. Therefore we must do everything that uses
+ * NOTE: a nasty bug used to lurk here. It is possible for the source
+ * and destination pages to be the same (since this tuple-chain member
+ * can be on a page lower than the one we're currently processing in
+ * the outer loop). If that's true, then after vacuum_page() the
+ * source tuple will have been moved, and tuple.t_data will be
+ * pointing at garbage. Therefore we must do everything that uses
* old_tup->t_data BEFORE this step!!
*
- * This path is different from the other callers of
- * vacuum_page, because we have already incremented
- * the vacpage's offsets_used field to account for the
- * tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is wrong.
- * But since that's a good debugging check for all
- * other callers, we work around it here rather than
- * remove it.
+ * This path is different from the other callers of vacuum_page, because
+ * we have already incremented the vacpage's offsets_used field to
+ * account for the tuple(s) we expect to move onto the page. Therefore
+ * vacuum_page's check for offsets_used == 0 is wrong. But since
+ * that's a good debugging check for all other callers, we work around
+ * it here rather than remove it.
*/
if (!PageIsEmpty(dst_page) && cleanVpd)
{
- int sv_offsets_used = dst_vacpage->offsets_used;
+ int sv_offsets_used = dst_vacpage->offsets_used;
dst_vacpage->offsets_used = 0;
vacuum_page(rel, dst_buf, dst_vacpage);
@@ -2471,8 +2465,8 @@ move_chain_tuple(Relation rel,
}
/*
- * Update the state of the copied tuple, and store it
- * on the destination page.
+ * Update the state of the copied tuple, and store it on the
+ * destination page.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
@@ -2484,7 +2478,7 @@ move_chain_tuple(Relation rel,
if (newoff == InvalidOffsetNumber)
{
elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
- (unsigned long) tuple_len, dst_vacpage->blkno);
+ (unsigned long) tuple_len, dst_vacpage->blkno);
}
newitemid = PageGetItemId(dst_page, newoff);
pfree(newtup.t_data);
@@ -2509,8 +2503,7 @@ move_chain_tuple(Relation rel,
else
{
/*
- * No XLOG record, but still need to flag that XID
- * exists on disk
+ * No XLOG record, but still need to flag that XID exists on disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -2518,9 +2511,8 @@ move_chain_tuple(Relation rel,
END_CRIT_SECTION();
/*
- * Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain
- * otherwise.
+ * Set new tuple's t_ctid pointing to itself for last tuple in chain,
+ * and to next tuple in chain otherwise.
*/
/* Is this ok after log_heap_move() and END_CRIT_SECTION()? */
if (!ItemPointerIsValid(ctid))
@@ -2559,10 +2551,10 @@ move_plain_tuple(Relation rel,
ExecContext ec)
{
TransactionId myXID = GetCurrentTransactionId();
- HeapTupleData newtup;
- OffsetNumber newoff;
- ItemId newitemid;
- Size tuple_len = old_tup->t_len;
+ HeapTupleData newtup;
+ OffsetNumber newoff;
+ ItemId newitemid;
+ Size tuple_len = old_tup->t_len;
/* copy tuple */
heap_copytuple_with_tuple(old_tup, &newtup);
@@ -2570,9 +2562,9 @@ move_plain_tuple(Relation rel,
/*
* register invalidation of source tuple in catcaches.
*
- * (Note: we do not need to register the copied tuple, because we
- * are not changing the tuple contents and so there cannot be
- * any need to flush negative catcache entries.)
+ * (Note: we do not need to register the copied tuple, because we are not
+ * changing the tuple contents and so there cannot be any need to
+ * flush negative catcache entries.)
*/
CacheInvalidateHeapTuple(rel, old_tup);
@@ -2609,8 +2601,8 @@ move_plain_tuple(Relation rel,
* Mark old tuple as MOVED_OFF by me.
*/
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID |
- HEAP_MOVED_IN);
+ HEAP_XMIN_INVALID |
+ HEAP_MOVED_IN);
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
@@ -2628,8 +2620,7 @@ move_plain_tuple(Relation rel,
else
{
/*
- * No XLOG record, but still need to flag that XID exists
- * on disk
+ * No XLOG record, but still need to flag that XID exists on disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -2637,7 +2628,7 @@ move_plain_tuple(Relation rel,
END_CRIT_SECTION();
dst_vacpage->free = ((PageHeader) dst_page)->pd_upper -
- ((PageHeader) dst_page)->pd_lower;
+ ((PageHeader) dst_page)->pd_lower;
LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK);
LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
@@ -2670,17 +2661,17 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
{
int checked_moved = 0;
int i;
- VacPage *curpage;
+ VacPage *curpage;
for (i = 0, curpage = fraged_pages->pagedesc;
i < num_fraged_pages;
i++, curpage++)
{
- Buffer buf;
- Page page;
- OffsetNumber max_offset;
- OffsetNumber off;
- int num_tuples = 0;
+ Buffer buf;
+ Page page;
+ OffsetNumber max_offset;
+ OffsetNumber off;
+ int num_tuples = 0;
vacuum_delay_point();
@@ -2696,17 +2687,18 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
off <= max_offset;
off = OffsetNumberNext(off))
{
- ItemId itemid = PageGetItemId(page, off);
- HeapTupleHeader htup;
+ ItemId itemid = PageGetItemId(page, off);
+ HeapTupleHeader htup;
if (!ItemIdIsUsed(itemid))
continue;
htup = (HeapTupleHeader) PageGetItem(page, itemid);
if (htup->t_infomask & HEAP_XMIN_COMMITTED)
continue;
+
/*
- * See comments in the walk-along-page loop above, why we
- * have Asserts here instead of if (...) elog(ERROR). The
+ * See comments in the walk-along-page loop above, why we have
+ * Asserts here instead of if (...) elog(ERROR). The
* difference here is that we may see MOVED_IN.
*/
Assert(htup->t_infomask & HEAP_MOVED);
@@ -2865,14 +2857,14 @@ scan_index(Relation indrel, double num_tuples)
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
@@ -2932,16 +2924,16 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
"%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ "%s",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
@@ -3370,7 +3362,7 @@ vacuum_delay_point(void)
if (VacuumCostActive && !InterruptPending &&
VacuumCostBalance >= VacuumCostLimit)
{
- int msec;
+ int msec;
msec = VacuumCostDelay * VacuumCostBalance / VacuumCostLimit;
if (msec > VacuumCostDelay * 4)
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index bfd41beec55..f19001d6796 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.44 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.45 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -594,14 +594,14 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
pfree(stats);
}
@@ -654,16 +654,16 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
"%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ "%s",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
pfree(stats);
}
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 8c962c5206c..cb4a3cde717 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.100 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.101 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ assign_datestyle(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"datestyle\"")));
+ errmsg("invalid list syntax for parameter \"datestyle\"")));
return NULL;
}
@@ -148,8 +148,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"datestyle\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"datestyle\" key word: \"%s\"",
+ tok)));
ok = false;
break;
}
@@ -314,9 +314,10 @@ assign_timezone(const char *value, bool doit, GucSource source)
*
* During GUC initialization, since the timezone library isn't
* set up yet, pg_get_current_timezone will return NULL and we
- * will leave the setting as UNKNOWN. If this isn't overridden
- * from the config file then pg_timezone_initialize() will
- * eventually select a default value from the environment.
+ * will leave the setting as UNKNOWN. If this isn't
+ * overridden from the config file then
+ * pg_timezone_initialize() will eventually select a default
+ * value from the environment.
*/
const char *curzone = pg_get_current_timezone();
@@ -329,13 +330,14 @@ assign_timezone(const char *value, bool doit, GucSource source)
* Otherwise assume it is a timezone name.
*
* We have to actually apply the change before we can have any
- * hope of checking it. So, save the old value in case we have
- * to back out. We have to copy since pg_get_current_timezone
- * returns a pointer to its static state.
+ * hope of checking it. So, save the old value in case we
+ * have to back out. We have to copy since
+ * pg_get_current_timezone returns a pointer to its static
+ * state.
*
- * This would all get a lot simpler if the TZ library had a better
- * API that would let us look up and test a timezone name without
- * making it the default.
+ * This would all get a lot simpler if the TZ library had a
+ * better API that would let us look up and test a timezone
+ * name without making it the default.
*/
const char *cur_tz;
char *save_tz;
@@ -368,22 +370,23 @@ assign_timezone(const char *value, bool doit, GucSource source)
else
{
/*
- * TZ library wasn't initialized yet. Annoyingly, we will
- * come here during startup because guc-file.l checks
- * the value with doit = false before actually applying.
- * The best approach seems to be as follows:
+ * TZ library wasn't initialized yet. Annoyingly, we
+ * will come here during startup because guc-file.l
+ * checks the value with doit = false before actually
+ * applying. The best approach seems to be as follows:
*
* 1. known && acceptable: leave the setting in place,
* since we'll apply it soon anyway. This is mainly
- * so that any log messages printed during this interval
- * are timestamped with the user's requested timezone.
+ * so that any log messages printed during this
+ * interval are timestamped with the user's requested
+ * timezone.
*
- * 2. known && !acceptable: revert to GMT for lack of
- * any better idea. (select_default_timezone() may get
+ * 2. known && !acceptable: revert to GMT for lack of any
+ * better idea. (select_default_timezone() may get
* called later to undo this.)
*
- * 3. !known: no need to do anything since TZ library
- * did not change its state.
+ * 3. !known: no need to do anything since TZ library did
+ * not change its state.
*
* Again, this should all go away sometime soon.
*/
@@ -441,7 +444,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
const char *
show_timezone(void)
{
- const char *tzn;
+ const char *tzn;
if (HasCTZSet)
{
@@ -472,14 +475,14 @@ assign_XactIsoLevel(const char *value, bool doit, GucSource source)
{
if (doit && source >= PGC_S_INTERACTIVE)
{
- if (SerializableSnapshot != NULL)
- ereport(ERROR,
- (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query")));
- if (IsSubTransaction())
- ereport(ERROR,
- (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction")));
+ if (SerializableSnapshot != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query")));
+ if (IsSubTransaction())
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction")));
}
if (strcmp(value, "serializable") == 0)
@@ -596,7 +599,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
* limit on names, so we can tell whether we're being passed an initial
* username or a saved/restored value.
*/
-extern char *session_authorization_string; /* in guc.c */
+extern char *session_authorization_string; /* in guc.c */
const char *
assign_session_authorization(const char *value, bool doit, GucSource source)
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index aafc42b1d40..abc37fcc8f0 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.84 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.85 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -191,8 +191,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change data type of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change data type of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}