aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/gin/ginbtree.c1
-rw-r--r--src/backend/access/gist/gistbuild.c8
-rw-r--r--src/backend/access/gist/gistget.c5
-rw-r--r--src/backend/access/gist/gistutil.c6
-rw-r--r--src/backend/access/gist/gistxlog.c4
-rw-r--r--src/backend/access/hash/hashinsert.c2
-rw-r--r--src/backend/access/hash/hashsearch.c2
-rw-r--r--src/backend/access/heap/heapam.c461
-rw-r--r--src/backend/access/heap/rewriteheap.c2
-rw-r--r--src/backend/access/heap/visibilitymap.c2
-rw-r--r--src/backend/access/nbtree/nbtpage.c8
-rw-r--r--src/backend/access/nbtree/nbtxlog.c22
-rw-r--r--src/backend/access/rmgrdesc/clogdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/dbasedesc.c4
-rw-r--r--src/backend/access/rmgrdesc/gindesc.c4
-rw-r--r--src/backend/access/rmgrdesc/gistdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/hashdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/heapdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/mxactdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/nbtdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/relmapdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/seqdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/smgrdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/spgdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/standbydesc.c4
-rw-r--r--src/backend/access/rmgrdesc/tblspcdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/xlogdesc.c9
-rw-r--r--src/backend/access/spgist/spgtextproc.c2
-rw-r--r--src/backend/access/transam/multixact.c184
-rw-r--r--src/backend/access/transam/timeline.c21
-rw-r--r--src/backend/access/transam/xact.c15
-rw-r--r--src/backend/access/transam/xlog.c641
-rw-r--r--src/backend/access/transam/xlogarchive.c41
-rw-r--r--src/backend/access/transam/xlogfuncs.c10
-rw-r--r--src/backend/access/transam/xlogreader.c6
36 files changed, 768 insertions, 744 deletions
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 7acc8f646ec..2a6be4b1a99 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -173,7 +173,6 @@ void
ginFindParents(GinBtree btree, GinBtreeStack *stack,
BlockNumber rootBlkno)
{
-
Page page;
Buffer buffer;
BlockNumber blkno,
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 46f7ce65635..2f2edb83626 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -610,9 +610,9 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate);
if (newtup)
{
- blkno = gistbufferinginserttuples(buildstate, buffer, level,
- &newtup, 1, childoffnum,
- InvalidBlockNumber, InvalidOffsetNumber);
+ blkno = gistbufferinginserttuples(buildstate, buffer, level,
+ &newtup, 1, childoffnum,
+ InvalidBlockNumber, InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
@@ -680,7 +680,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
GISTBuildBuffers *gfbb = buildstate->gfbb;
List *splitinfo;
bool is_split;
- BlockNumber placed_to_blk = InvalidBlockNumber;
+ BlockNumber placed_to_blk = InvalidBlockNumber;
is_split = gistplacetopage(buildstate->indexrel,
buildstate->freespace,
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index cef31ce66e9..e97ab8f3fd5 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -364,8 +364,9 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
item->blkno = ItemPointerGetBlockNumber(&it->t_tid);
/*
- * LSN of current page is lsn of parent page for child. We only
- * have a shared lock, so we need to get the LSN atomically.
+ * LSN of current page is lsn of parent page for child. We
+ * only have a shared lock, so we need to get the LSN
+ * atomically.
*/
item->data.parentlsn = BufferGetLSNAtomic(buffer);
}
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index f7d50ddb712..b9c1967ebc0 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
@@ -810,8 +810,8 @@ gistGetFakeLSN(Relation rel)
if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
{
/*
- * Temporary relations are only accessible in our session, so a
- * simple backend-local counter will do.
+ * Temporary relations are only accessible in our session, so a simple
+ * backend-local counter will do.
*/
return counter++;
}
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 3daeea396be..17946bfec3f 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
@@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 63be2f37872..4508a36bd05 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -90,7 +90,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index ceb9ef72baa..91661ba0e03 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9498cbb8a51..834a566f7e0 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -120,32 +120,34 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi,
static const struct
{
LOCKMODE hwlock;
- MultiXactStatus lockstatus;
- MultiXactStatus updstatus;
+ MultiXactStatus lockstatus;
+ MultiXactStatus updstatus;
}
-tupleLockExtraInfo[MaxLockTupleMode + 1] =
+
+ tupleLockExtraInfo[MaxLockTupleMode + 1] =
{
- { /* LockTupleKeyShare */
+ { /* LockTupleKeyShare */
AccessShareLock,
MultiXactStatusForKeyShare,
- -1 /* KeyShare does not allow updating tuples */
+ -1 /* KeyShare does not allow updating tuples */
},
- { /* LockTupleShare */
+ { /* LockTupleShare */
RowShareLock,
MultiXactStatusForShare,
- -1 /* Share does not allow updating tuples */
+ -1 /* Share does not allow updating tuples */
},
- { /* LockTupleNoKeyExclusive */
+ { /* LockTupleNoKeyExclusive */
ExclusiveLock,
MultiXactStatusForNoKeyUpdate,
MultiXactStatusNoKeyUpdate
},
- { /* LockTupleExclusive */
+ { /* LockTupleExclusive */
AccessExclusiveLock,
MultiXactStatusForUpdate,
MultiXactStatusUpdate
}
};
+
/* Get the LOCKMODE for a given MultiXactStatus */
#define LOCKMODE_from_mxstatus(status) \
(tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
@@ -168,12 +170,12 @@ tupleLockExtraInfo[MaxLockTupleMode + 1] =
*/
static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
{
- LockTupleKeyShare, /* ForKeyShare */
- LockTupleShare, /* ForShare */
- LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
- LockTupleExclusive, /* ForUpdate */
- LockTupleNoKeyExclusive, /* NoKeyUpdate */
- LockTupleExclusive /* Update */
+ LockTupleKeyShare, /* ForKeyShare */
+ LockTupleShare, /* ForShare */
+ LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
+ LockTupleExclusive, /* ForUpdate */
+ LockTupleNoKeyExclusive, /* NoKeyUpdate */
+ LockTupleExclusive /* Update */
};
/* Get the LockTupleMode for a given MultiXactStatus */
@@ -365,10 +367,10 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
* page. That's how index-only scans work fine in hot standby. A crucial
* difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
- * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if the
- * page-level flag can be trusted in the same way, because it might get
- * propagated somehow without being explicitly WAL-logged, e.g. via a full
- * page write. Until we can prove that beyond doubt, let's check each
+ * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
+ * the page-level flag can be trusted in the same way, because it might
+ * get propagated somehow without being explicitly WAL-logged, e.g. via a
+ * full page write. Until we can prove that beyond doubt, let's check each
* tuple for visibility the hard way.
*/
all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
@@ -1880,7 +1882,7 @@ heap_get_latest_tid(Relation relation,
* tuple. Check for XMIN match.
*/
if (TransactionIdIsValid(priorXmax) &&
- !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
+ !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
{
UnlockReleaseBuffer(buffer);
break;
@@ -2488,7 +2490,7 @@ compute_infobits(uint16 infomask, uint16 infomask2)
((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
- /* note we ignore HEAP_XMAX_SHR_LOCK here */
+ /* note we ignore HEAP_XMAX_SHR_LOCK here */
((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
XLHL_KEYS_UPDATED : 0);
@@ -2730,13 +2732,12 @@ l1:
}
/*
- * If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -2846,7 +2847,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd);
switch (result)
{
@@ -2936,7 +2937,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
bool checked_lockers;
bool locker_remains;
TransactionId xmax_new_tuple,
- xmax_old_tuple;
+ xmax_old_tuple;
uint16 infomask_old_tuple,
infomask2_old_tuple,
infomask_new_tuple,
@@ -3006,13 +3007,13 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* If we're not updating any "key" column, we can grab a weaker lock type.
- * This allows for more concurrency when we are running simultaneously with
- * foreign key checks.
+ * This allows for more concurrency when we are running simultaneously
+ * with foreign key checks.
*
- * Note that if a column gets detoasted while executing the update, but the
- * value ends up being the same, this test will fail and we will use the
- * stronger lock. This is acceptable; the important case to optimize is
- * updates that don't manipulate key columns, not those that
+ * Note that if a column gets detoasted while executing the update, but
+ * the value ends up being the same, this test will fail and we will use
+ * the stronger lock. This is acceptable; the important case to optimize
+ * is updates that don't manipulate key columns, not those that
* serendipitiously arrive at the same key values.
*/
HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs,
@@ -3026,12 +3027,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
*/
MultiXactIdSetOldestMember();
}
@@ -3064,7 +3065,7 @@ l2:
}
else if (result == HeapTupleBeingUpdated && wait)
{
- TransactionId xwait;
+ TransactionId xwait;
uint16 infomask;
bool can_continue = false;
@@ -3073,13 +3074,14 @@ l2:
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
- * should be fixed if such a caller is introduced. It wasn't a problem
- * previously because this code would always wait, but now that some
- * tuple locks do not conflict with one of the lock modes we use, it is
- * possible that this case is interesting to handle specially.
+ * should be fixed if such a caller is introduced. It wasn't a
+ * problem previously because this code would always wait, but now
+ * that some tuple locks do not conflict with one of the lock modes we
+ * use, it is possible that this case is interesting to handle
+ * specially.
*
- * This may cause failures with third-party code that calls heap_update
- * directly.
+ * This may cause failures with third-party code that calls
+ * heap_update directly.
*/
/* must copy state data before unlocking buffer */
@@ -3109,15 +3111,15 @@ l2:
* gone (or even not sleep at all in some cases); we need to preserve
* it as locker, unless it is gone completely.
*
- * If it's not a multi, we need to check for sleeping conditions before
- * actually going to sleep. If the update doesn't conflict with the
- * locks, we just continue without sleeping (but making sure it is
- * preserved).
+ * If it's not a multi, we need to check for sleeping conditions
+ * before actually going to sleep. If the update doesn't conflict
+ * with the locks, we just continue without sleeping (but making sure
+ * it is preserved).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId update_xact;
- int remain;
+ TransactionId update_xact;
+ int remain;
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, mxact_status, &remain,
@@ -3135,18 +3137,18 @@ l2:
goto l2;
/*
- * Note that the multixact may not be done by now. It could have
+ * Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
- * the tuple with KeyShare if we only got TupleLockUpdate. If this
- * is the case, we have to be careful to mark the updated tuple
- * with the surviving members in Xmax.
+ * the tuple with KeyShare if we only got TupleLockUpdate. If
+ * this is the case, we have to be careful to mark the updated
+ * tuple with the surviving members in Xmax.
*
- * Note that there could have been another update in the MultiXact.
- * In that case, we need to check whether it committed or aborted.
- * If it aborted we are safe to update it again; otherwise there is
- * an update conflict, and we have to return HeapTupleUpdated
- * below.
+ * Note that there could have been another update in the
+ * MultiXact. In that case, we need to check whether it committed
+ * or aborted. If it aborted we are safe to update it again;
+ * otherwise there is an update conflict, and we have to return
+ * HeapTupleUpdated below.
*
* In the LockTupleExclusive case, we still need to preserve the
* surviving members: those would include the tuple locks we had
@@ -3167,21 +3169,21 @@ l2:
else
{
/*
- * If it's just a key-share locker, and we're not changing the
- * key columns, we don't need to wait for it to end; but we
- * need to preserve it as locker.
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
*/
if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * recheck the locker; if someone else changed the tuple while we
- * weren't looking, start over.
+ * recheck the locker; if someone else changed the tuple while
+ * we weren't looking, start over.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
can_continue = true;
@@ -3194,13 +3196,13 @@ l2:
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then some
- * other xact could update this tuple before we get to this point.
- * Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then
+ * some other xact could update this tuple before we get to
+ * this point. Check for xmax change, and start over if so.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
/* Otherwise check if it committed or aborted */
@@ -3247,8 +3249,8 @@ l2:
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
- * bit unfortunate, especially since we'll now have to recheck whether
- * the tuple has been locked or updated under us, but hopefully it won't
+ * bit unfortunate, especially since we'll now have to recheck whether the
+ * tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@@ -3656,9 +3658,9 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do a
- * single heap_deform_tuple call on each tuple, instead? But that doesn't
- * work for system columns ...
+ * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
+ * a single heap_deform_tuple call on each tuple, instead? But that
+ * doesn't work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
@@ -3720,12 +3722,12 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
bool *satisfies_hot, bool *satisfies_key,
HeapTuple oldtup, HeapTuple newtup)
{
- int next_hot_attnum;
- int next_key_attnum;
- bool hot_result = true;
- bool key_result = true;
- bool key_done = false;
- bool hot_done = false;
+ int next_hot_attnum;
+ int next_key_attnum;
+ bool hot_result = true;
+ bool key_result = true;
+ bool key_done = false;
+ bool hot_done = false;
next_hot_attnum = bms_first_member(hot_attrs);
if (next_hot_attnum == -1)
@@ -3743,8 +3745,8 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
for (;;)
{
- int check_now;
- bool changed;
+ int check_now;
+ bool changed;
/* both bitmapsets are now empty */
if (key_done && hot_done)
@@ -3813,7 +3815,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd, &lockmode);
switch (result)
{
@@ -3843,7 +3845,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
static MultiXactStatus
get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
{
- MultiXactStatus retval;
+ MultiXactStatus retval;
if (is_update)
retval = tupleLockExtraInfo[mode].updstatus;
@@ -3933,7 +3935,7 @@ l3:
uint16 infomask;
uint16 infomask2;
bool require_sleep;
- ItemPointerData t_ctid;
+ ItemPointerData t_ctid;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
@@ -3944,22 +3946,22 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
- * If any subtransaction of the current top transaction already holds a
- * lock as strong or stronger than what we're requesting, we
+ * If any subtransaction of the current top transaction already holds
+ * a lock as strong or stronger than what we're requesting, we
* effectively hold the desired lock already. We *must* succeed
- * without trying to take the tuple lock, else we will deadlock against
- * anyone wanting to acquire a stronger lock.
+ * without trying to take the tuple lock, else we will deadlock
+ * against anyone wanting to acquire a stronger lock.
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- int i;
- int nmembers;
+ int i;
+ int nmembers;
MultiXactMember *members;
/*
- * We don't need to allow old multixacts here; if that had been the
- * case, HeapTupleSatisfiesUpdate would have returned MayBeUpdated
- * and we wouldn't be here.
+ * We don't need to allow old multixacts here; if that had been
+ * the case, HeapTupleSatisfiesUpdate would have returned
+ * MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
@@ -3967,7 +3969,7 @@ l3:
{
if (TransactionIdIsCurrentTransactionId(members[i].xid))
{
- LockTupleMode membermode;
+ LockTupleMode membermode;
membermode = TUPLOCK_from_mxstatus(members[i].status);
@@ -4001,8 +4003,8 @@ l3:
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
}
else
LockTupleTuplock(relation, tid, mode);
@@ -4023,34 +4025,34 @@ l3:
* continue if the key hasn't been modified.
*
* However, if there are updates, we need to walk the update chain
- * to mark future versions of the row as locked, too. That way, if
- * somebody deletes that future version, we're protected against
- * the key going away. This locking of future versions could block
- * momentarily, if a concurrent transaction is deleting a key; or
- * it could return a value to the effect that the transaction
- * deleting the key has already committed. So we do this before
- * re-locking the buffer; otherwise this would be prone to
- * deadlocks.
+ * to mark future versions of the row as locked, too. That way,
+ * if somebody deletes that future version, we're protected
+ * against the key going away. This locking of future versions
+ * could block momentarily, if a concurrent transaction is
+ * deleting a key; or it could return a value to the effect that
+ * the transaction deleting the key has already committed. So we
+ * do this before re-locking the buffer; otherwise this would be
+ * prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
- * the buffer. For it to change while we're not looking, the other
- * properties we're testing for below after re-locking the buffer
- * would also change, in which case we would restart this loop
- * above.
+ * the buffer. For it to change while we're not looking, the
+ * other properties we're testing for below after re-locking the
+ * buffer would also change, in which case we would restart this
+ * loop above.
*/
if (!(infomask2 & HEAP_KEYS_UPDATED))
{
- bool updated;
+ bool updated;
updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
/*
- * If there are updates, follow the update chain; bail out
- * if that cannot be done.
+ * If there are updates, follow the update chain; bail out if
+ * that cannot be done.
*/
if (follow_updates && updated)
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@@ -4069,8 +4071,9 @@ l3:
/*
* Make sure it's still an appropriate lock, else start over.
* Also, if it wasn't updated before we released the lock, but
- * is updated now, we start over too; the reason is that we now
- * need to follow the update chain to lock the new versions.
+ * is updated now, we start over too; the reason is that we
+ * now need to follow the update chain to lock the new
+ * versions.
*/
if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
@@ -4114,20 +4117,20 @@ l3:
{
/*
* If we're requesting NoKeyExclusive, we might also be able to
- * avoid sleeping; just ensure that there's no other lock type than
- * KeyShare. Note that this is a bit more involved than just
+ * avoid sleeping; just ensure that there's no other lock type
+ * than KeyShare. Note that this is a bit more involved than just
* checking hint bits -- we need to expand the multixact to figure
* out lock modes for each one (unless there was only one such
* locker).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- int nmembers;
+ int nmembers;
MultiXactMember *members;
/*
- * We don't need to allow old multixacts here; if that had been
- * the case, HeapTupleSatisfiesUpdate would have returned
+ * We don't need to allow old multixacts here; if that had
+ * been the case, HeapTupleSatisfiesUpdate would have returned
* MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
@@ -4135,15 +4138,15 @@ l3:
if (nmembers <= 0)
{
/*
- * No need to keep the previous xmax here. This is unlikely
- * to happen.
+ * No need to keep the previous xmax here. This is
+ * unlikely to happen.
*/
require_sleep = false;
}
else
{
- int i;
- bool allowed = true;
+ int i;
+ bool allowed = true;
for (i = 0; i < nmembers; i++)
{
@@ -4180,8 +4183,8 @@ l3:
/* if the xmax changed in the meantime, start over */
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/* otherwise, we're good */
require_sleep = false;
@@ -4221,7 +4224,7 @@ l3:
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@@ -4243,15 +4246,15 @@ l3:
* for xmax change, and start over if so.
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
- * own xact or other subxacts of this backend. We need to
+ * own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
@@ -4275,7 +4278,7 @@ l3:
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@@ -4294,15 +4297,15 @@ l3:
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * this point. Check for xmax change, and start over if so.
*/
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
- * Otherwise check if it committed or aborted. Note we cannot
+ * Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
@@ -4355,8 +4358,8 @@ failed:
* for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
- * exclusive lock on the tuple and the caller only wants key share or share
- * lock. It would certainly not do to give up the exclusive lock.
+ * exclusive lock on the tuple and the caller only wants key share or
+ * share lock. It would certainly not do to give up the exclusive lock.
*/
if (!(old_infomask & (HEAP_XMAX_INVALID |
HEAP_XMAX_COMMITTED |
@@ -4379,13 +4382,12 @@ failed:
}
/*
- * If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -4419,11 +4421,11 @@ failed:
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
- * Make sure there is no forward chain link in t_ctid. Note that in the
+ * Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
- * updated, we need to follow the update chain to lock the new versions
- * of the tuple as well.
+ * updated, we need to follow the update chain to lock the new versions of
+ * the tuple as well.
*/
if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
tuple->t_data->t_ctid = *tid;
@@ -4514,9 +4516,9 @@ compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
TransactionId *result_xmax, uint16 *result_infomask,
uint16 *result_infomask2)
{
- TransactionId new_xmax;
- uint16 new_infomask,
- new_infomask2;
+ TransactionId new_xmax;
+ uint16 new_infomask,
+ new_infomask2;
l5:
new_infomask = 0;
@@ -4562,11 +4564,11 @@ l5:
}
else if (old_infomask & HEAP_XMAX_IS_MULTI)
{
- MultiXactStatus new_status;
+ MultiXactStatus new_status;
/*
- * Currently we don't allow XMAX_COMMITTED to be set for multis,
- * so cross-check.
+ * Currently we don't allow XMAX_COMMITTED to be set for multis, so
+ * cross-check.
*/
Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
@@ -4587,10 +4589,11 @@ l5:
/*
* If the XMAX is already a MultiXactId, then we need to expand it to
- * include add_to_xmax; but if all the members were lockers and are all
- * gone, we can do away with the IS_MULTI bit and just set add_to_xmax
- * as the only locker/updater. If all lockers are gone and we have an
- * updater that aborted, we can also do without a multi.
+ * include add_to_xmax; but if all the members were lockers and are
+ * all gone, we can do away with the IS_MULTI bit and just set
+ * add_to_xmax as the only locker/updater. If all lockers are gone
+ * and we have an updater that aborted, we can also do without a
+ * multi.
*
* The cost of doing GetMultiXactIdMembers would be paid by
* MultiXactIdExpand if we weren't to do this, so this check is not
@@ -4624,8 +4627,8 @@ l5:
* It's a committed update, so we need to preserve him as updater of
* the tuple.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
@@ -4633,6 +4636,7 @@ l5:
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
+
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
@@ -4648,8 +4652,8 @@ l5:
* create a new MultiXactId that includes both the old locker or
* updater and our own TransactionId.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
{
@@ -4668,8 +4672,8 @@ l5:
{
/*
* LOCK_ONLY can be present alone only when a page has been
- * upgraded by pg_upgrade. But in that case,
- * TransactionIdIsInProgress() should have returned false. We
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@@ -4696,8 +4700,8 @@ l5:
*/
if (xmax == add_to_xmax)
{
- LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
- bool old_isupd = ISUPDATE_from_mxstatus(status);
+ LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
+ bool old_isupd = ISUPDATE_from_mxstatus(status);
/*
* We can do this if the new LockTupleMode is higher or equal than
@@ -4728,8 +4732,8 @@ l5:
* It's a committed update, so we gotta preserve him as updater of the
* tuple.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
@@ -4737,6 +4741,7 @@ l5:
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
+
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
@@ -4774,14 +4779,14 @@ static HTSU_Result
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
- ItemPointerData tupid;
- HeapTupleData mytup;
- Buffer buf;
- uint16 new_infomask,
- new_infomask2,
- old_infomask;
- TransactionId xmax,
- new_xmax;
+ ItemPointerData tupid;
+ HeapTupleData mytup;
+ Buffer buf;
+ uint16 new_infomask,
+ new_infomask2,
+ old_infomask;
+ TransactionId xmax,
+ new_xmax;
ItemPointerCopy(tid, &tupid);
@@ -4802,16 +4807,17 @@ l4:
xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
/*
- * If this tuple is updated and the key has been modified (or deleted),
- * what we do depends on the status of the updating transaction: if
- * it's live, we sleep until it finishes; if it has committed, we have
- * to fail (i.e. return HeapTupleUpdated); if it aborted, we ignore it.
- * For updates that didn't touch the key, we can just plough ahead.
+ * If this tuple is updated and the key has been modified (or
+ * deleted), what we do depends on the status of the updating
+ * transaction: if it's live, we sleep until it finishes; if it has
+ * committed, we have to fail (i.e. return HeapTupleUpdated); if it
+ * aborted, we ignore it. For updates that didn't touch the key, we
+ * can just plough ahead.
*/
if (!(old_infomask & HEAP_XMAX_INVALID) &&
(mytup.t_data->t_infomask2 & HEAP_KEYS_UPDATED))
{
- TransactionId update_xid;
+ TransactionId update_xid;
/*
* Note: we *must* check TransactionIdIsInProgress before
@@ -4832,7 +4838,7 @@ l4:
goto l4;
}
else if (TransactionIdDidAbort(update_xid))
- ; /* okay to proceed */
+ ; /* okay to proceed */
else if (TransactionIdDidCommit(update_xid))
{
UnlockReleaseBuffer(buf);
@@ -4861,7 +4867,7 @@ l4:
{
xl_heap_lock_updated xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.target.node = rel->rd_node;
@@ -4889,7 +4895,7 @@ l4:
/* if we find the end of update chain, we're done. */
if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
- ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
+ ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
HeapTupleHeaderIsOnlyLocked(mytup.t_data))
{
UnlockReleaseBuffer(buf);
@@ -4904,13 +4910,13 @@ l4:
/*
* heap_lock_updated_tuple
- * Follow update chain when locking an updated tuple, acquiring locks (row
- * marks) on the updated versions.
+ * Follow update chain when locking an updated tuple, acquiring locks (row
+ * marks) on the updated versions.
*
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
@@ -4932,12 +4938,12 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
{
/*
* If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -5117,9 +5123,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
- * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
- * + LOCKED. Normalize to INVALID just to be sure no one gets
- * confused. Also get rid of the HEAP_KEYS_UPDATED bit.
+ * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * Also get rid of the HEAP_KEYS_UPDATED bit.
*/
tuple->t_infomask &= ~HEAP_XMAX_BITS;
tuple->t_infomask |= HEAP_XMAX_INVALID;
@@ -5172,13 +5178,13 @@ static void
GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
uint16 *new_infomask2)
{
- int nmembers;
- MultiXactMember *members;
- int i;
- uint16 bits = HEAP_XMAX_IS_MULTI;
- uint16 bits2 = 0;
- bool has_update = false;
- LockTupleMode strongest = LockTupleKeyShare;
+ int nmembers;
+ MultiXactMember *members;
+ int i;
+ uint16 bits = HEAP_XMAX_IS_MULTI;
+ uint16 bits2 = 0;
+ bool has_update = false;
+ LockTupleMode strongest = LockTupleKeyShare;
/*
* We only use this in multis we just created, so they cannot be values
@@ -5188,7 +5194,7 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
for (i = 0; i < nmembers; i++)
{
- LockTupleMode mode;
+ LockTupleMode mode;
/*
* Remember the strongest lock mode held by any member of the
@@ -5249,22 +5255,22 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
static TransactionId
MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
{
- TransactionId update_xact = InvalidTransactionId;
- MultiXactMember *members;
- int nmembers;
+ TransactionId update_xact = InvalidTransactionId;
+ MultiXactMember *members;
+ int nmembers;
Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
Assert(t_infomask & HEAP_XMAX_IS_MULTI);
/*
- * Since we know the LOCK_ONLY bit is not set, this cannot be a
- * multi from pre-pg_upgrade.
+ * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
+ * pre-pg_upgrade.
*/
nmembers = GetMultiXactIdMembers(xmax, &members, false);
if (nmembers > 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
@@ -5284,6 +5290,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
members[i].status == MultiXactStatusUpdate);
update_xact = members[i].xid;
#ifndef USE_ASSERT_CHECKING
+
/*
* in an assert-enabled build, walk the whole array to ensure
* there's no other updater.
@@ -5300,7 +5307,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
/*
* HeapTupleGetUpdateXid
- * As above, but use a HeapTupleHeader
+ * As above, but use a HeapTupleHeader
*
* See also HeapTupleHeaderGetUpdateXid, which can be used without previously
* checking the hint bits.
@@ -5314,7 +5321,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
/*
* Do_MultiXactIdWait
- * Actual implementation for the two functions below.
+ * Actual implementation for the two functions below.
*
* We do this by sleeping on each member using XactLockTableWait. Any
* members that belong to the current backend are *not* waited for, however;
@@ -5432,7 +5439,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
@@ -6091,7 +6098,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
TransactionId cutoff_xid = xlrec->cutoff_xid;
- MultiXactId cutoff_multi = xlrec->cutoff_multi;
+ MultiXactId cutoff_multi = xlrec->cutoff_multi;
Buffer buffer;
Page page;
@@ -6361,7 +6368,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -6729,7 +6736,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
goto newt;
page = (Page) BufferGetPage(obuffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
@@ -6931,7 +6938,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -6962,7 +6969,7 @@ static void
heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_lock_updated *xlrec =
- (xl_heap_lock_updated *) XLogRecGetData(record);
+ (xl_heap_lock_updated *) XLogRecGetData(record);
Buffer buffer;
Page page;
OffsetNumber offnum;
@@ -6983,7 +6990,7 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -7033,7 +7040,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index a3aad3adf91..7105f0ab651 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -129,7 +129,7 @@ typedef struct RewriteStateData
* determine tuple visibility */
TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
* point */
- MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
+ MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
* cutoff point for multixacts */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
* them */
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index af64fe97e89..ffec6cbcc0c 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -292,7 +292,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
*/
if (DataChecksumsEnabled())
{
- Page heapPage = BufferGetPage(heapBuf);
+ Page heapPage = BufferGetPage(heapBuf);
/* caller is expected to set PD_ALL_VISIBLE first */
Assert(PageIsAllVisible(heapPage));
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 1d9cb7d1a06..f4077533bf5 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -532,8 +532,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
START_CRIT_SECTION();
/*
- * We don't do MarkBufferDirty here because we're about to initialise
- * the page, and nobody else can see it yet.
+ * We don't do MarkBufferDirty here because we're about to initialise the
+ * page, and nobody else can see it yet.
*/
/* XLOG stuff */
@@ -552,8 +552,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
/*
- * We don't do PageSetLSN here because we're about to initialise
- * the page, so no need.
+ * We don't do PageSetLSN here because we're about to initialise the
+ * page, so no need.
*/
}
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 4aabdba3d9e..cb5867ee3e6 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -373,7 +373,7 @@ btree_xlog_split(bool onleft, bool isroot,
* Note that this code ensures that the items remaining on the
* left page are in the correct item number order, but it does not
* reproduce the physical order they would have had. Is this
- * worth changing? See also _bt_restore_page().
+ * worth changing? See also _bt_restore_page().
*/
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
@@ -606,18 +606,18 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* In what follows, we have to examine the previous state of the index
- * page, as well as the heap page(s) it points to. This is only valid if
+ * page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
- * the preceding check is not just an optimization, but is *necessary*.
- * We won't have let in any user sessions before we reach consistency.
+ * the preceding check is not just an optimization, but is *necessary*. We
+ * won't have let in any user sessions before we reach consistency.
*/
if (!reachedConsistency)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
- * Get index page. If the DB is consistent, this should not fail, nor
+ * Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@@ -701,10 +701,10 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* XXX If all heap tuples were LP_DEAD then we will be returning
- * InvalidTransactionId here, causing conflict for all HS
- * transactions. That should happen very rarely (reasoning please?). Also
- * note that caller can't tell the difference between this case and the
- * fast path exit above. May need to change that in future.
+ * InvalidTransactionId here, causing conflict for all HS transactions.
+ * That should happen very rarely (reasoning please?). Also note that
+ * caller can't tell the difference between this case and the fast path
+ * exit above. May need to change that in future.
*/
return latestRemovedXid;
}
@@ -721,7 +721,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
* If we have any conflict processing to do, it must happen before we
* update the page.
*
- * Btree delete records can conflict with standby queries. You might
+ * Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts
diff --git a/src/backend/access/rmgrdesc/clogdesc.c b/src/backend/access/rmgrdesc/clogdesc.c
index 92be7130382..2655f083bdc 100644
--- a/src/backend/access/rmgrdesc/clogdesc.c
+++ b/src/backend/access/rmgrdesc/clogdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* clogdesc.c
- * rmgr descriptor routines for access/transam/clog.c
+ * rmgr descriptor routines for access/transam/clog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/clogdesc.c
+ * src/backend/access/rmgrdesc/clogdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c
index 55d435248f3..2354c5a5d83 100644
--- a/src/backend/access/rmgrdesc/dbasedesc.c
+++ b/src/backend/access/rmgrdesc/dbasedesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* dbasedesc.c
- * rmgr descriptor routines for commands/dbcommands.c
+ * rmgr descriptor routines for commands/dbcommands.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/dbasedesc.c
+ * src/backend/access/rmgrdesc/dbasedesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c
index 53bc482ec21..5400c8628fc 100644
--- a/src/backend/access/rmgrdesc/gindesc.c
+++ b/src/backend/access/rmgrdesc/gindesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gindesc.c
- * rmgr descriptor routines for access/transam/gin/ginxlog.c
+ * rmgr descriptor routines for access/transam/gin/ginxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/gindesc.c
+ * src/backend/access/rmgrdesc/gindesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c
index da81595fd41..c58c8a261ad 100644
--- a/src/backend/access/rmgrdesc/gistdesc.c
+++ b/src/backend/access/rmgrdesc/gistdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gistdesc.c
- * rmgr descriptor routines for access/gist/gistxlog.c
+ * rmgr descriptor routines for access/gist/gistxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/gistdesc.c
+ * src/backend/access/rmgrdesc/gistdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c
index a50008478e2..6d4a278adc2 100644
--- a/src/backend/access/rmgrdesc/hashdesc.c
+++ b/src/backend/access/rmgrdesc/hashdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* hashdesc.c
- * rmgr descriptor routines for access/hash/hash.c
+ * rmgr descriptor routines for access/hash/hash.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/hashdesc.c
+ * src/backend/access/rmgrdesc/hashdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c
index 272208417a3..bc8b98528d6 100644
--- a/src/backend/access/rmgrdesc/heapdesc.c
+++ b/src/backend/access/rmgrdesc/heapdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* heapdesc.c
- * rmgr descriptor routines for access/heap/heapam.c
+ * rmgr descriptor routines for access/heap/heapam.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/heapdesc.c
+ * src/backend/access/rmgrdesc/heapdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/mxactdesc.c b/src/backend/access/rmgrdesc/mxactdesc.c
index 3e6cba062d3..b2466a1e2b6 100644
--- a/src/backend/access/rmgrdesc/mxactdesc.c
+++ b/src/backend/access/rmgrdesc/mxactdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* mxactdesc.c
- * rmgr descriptor routines for access/transam/multixact.c
+ * rmgr descriptor routines for access/transam/multixact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/mxactdesc.c
+ * src/backend/access/rmgrdesc/mxactdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c
index 400e11b0506..b8f0d69df0c 100644
--- a/src/backend/access/rmgrdesc/nbtdesc.c
+++ b/src/backend/access/rmgrdesc/nbtdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* nbtdesc.c
- * rmgr descriptor routines for access/nbtree/nbtxlog.c
+ * rmgr descriptor routines for access/nbtree/nbtxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/nbtdesc.c
+ * src/backend/access/rmgrdesc/nbtdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/relmapdesc.c b/src/backend/access/rmgrdesc/relmapdesc.c
index 4c731c9b568..d3fe2674356 100644
--- a/src/backend/access/rmgrdesc/relmapdesc.c
+++ b/src/backend/access/rmgrdesc/relmapdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* relmapdesc.c
- * rmgr descriptor routines for utils/cache/relmapper.c
+ * rmgr descriptor routines for utils/cache/relmapper.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/relmapdesc.c
+ * src/backend/access/rmgrdesc/relmapdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/seqdesc.c b/src/backend/access/rmgrdesc/seqdesc.c
index 4d6a16adae1..90400e201a9 100644
--- a/src/backend/access/rmgrdesc/seqdesc.c
+++ b/src/backend/access/rmgrdesc/seqdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* seqdesc.c
- * rmgr descriptor routines for commands/sequence.c
+ * rmgr descriptor routines for commands/sequence.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/seqdesc.c
+ * src/backend/access/rmgrdesc/seqdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/smgrdesc.c b/src/backend/access/rmgrdesc/smgrdesc.c
index 176d8142a60..355153c613e 100644
--- a/src/backend/access/rmgrdesc/smgrdesc.c
+++ b/src/backend/access/rmgrdesc/smgrdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* smgrdesc.c
- * rmgr descriptor routines for catalog/storage.c
+ * rmgr descriptor routines for catalog/storage.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/smgrdesc.c
+ * src/backend/access/rmgrdesc/smgrdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c
index aca22600d42..fa71a4d637a 100644
--- a/src/backend/access/rmgrdesc/spgdesc.c
+++ b/src/backend/access/rmgrdesc/spgdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* spgdesc.c
- * rmgr descriptor routines for access/spgist/spgxlog.c
+ * rmgr descriptor routines for access/spgist/spgxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/spgdesc.c
+ * src/backend/access/rmgrdesc/spgdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/standbydesc.c b/src/backend/access/rmgrdesc/standbydesc.c
index 5fb6f54b3b6..8e0c37d2f51 100644
--- a/src/backend/access/rmgrdesc/standbydesc.c
+++ b/src/backend/access/rmgrdesc/standbydesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* standbydesc.c
- * rmgr descriptor routines for storage/ipc/standby.c
+ * rmgr descriptor routines for storage/ipc/standby.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/standbydesc.c
+ * src/backend/access/rmgrdesc/standbydesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/tblspcdesc.c b/src/backend/access/rmgrdesc/tblspcdesc.c
index c2c88cd6937..76f7ca71f24 100644
--- a/src/backend/access/rmgrdesc/tblspcdesc.c
+++ b/src/backend/access/rmgrdesc/tblspcdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* tblspcdesc.c
- * rmgr descriptor routines for commands/tablespace.c
+ * rmgr descriptor routines for commands/tablespace.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/tblspcdesc.c
+ * src/backend/access/rmgrdesc/tblspcdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 11c6912753a..c9c7b4a2082 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* xactdesc.c
- * rmgr descriptor routines for access/transam/xact.c
+ * rmgr descriptor routines for access/transam/xact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/xactdesc.c
+ * src/backend/access/rmgrdesc/xactdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c
index 4c68b6ae0a3..2bad52748a3 100644
--- a/src/backend/access/rmgrdesc/xlogdesc.c
+++ b/src/backend/access/rmgrdesc/xlogdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* xlogdesc.c
- * rmgr descriptor routines for access/transam/xlog.c
+ * rmgr descriptor routines for access/transam/xlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/xlogdesc.c
+ * src/backend/access/rmgrdesc/xlogdesc.c
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
"tli %u; prev tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest multi %u in DB %u; "
"oldest running xid %u; %s",
- (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
+ (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
checkpoint->ThisTimeLineID,
checkpoint->PrevTimeLineID,
checkpoint->fullPageWrites ? "true" : "false",
@@ -84,7 +84,8 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
}
else if (info == XLOG_HINT)
{
- BkpBlock *bkp = (BkpBlock *) rec;
+ BkpBlock *bkp = (BkpBlock *) rec;
+
appendStringInfo(buf, "page hint: %s block %u",
relpathperm(bkp->node, bkp->fork),
bkp->block);
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index 8d50dcc6183..e430d9c1ace 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -30,7 +30,7 @@
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 69e85463996..a74678d967f 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -5,7 +5,7 @@
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. Each MultiXactMember is comprised of a
+ * shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
@@ -50,7 +50,7 @@
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments. When new multixactid
+ * and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
@@ -108,7 +108,7 @@
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
@@ -177,17 +177,17 @@ typedef struct MultiXactStateData
MultiXactId lastTruncationPoint;
/*
- * oldest multixact that is still on disk. Anything older than this should
- * not be consulted.
+ * oldest multixact that is still on disk. Anything older than this
+ * should not be consulted.
*/
- MultiXactId oldestMultiXactId;
- Oid oldestMultiXactDB;
+ MultiXactId oldestMultiXactId;
+ Oid oldestMultiXactDB;
/* support for anti-wraparound measures */
- MultiXactId multiVacLimit;
- MultiXactId multiWarnLimit;
- MultiXactId multiStopLimit;
- MultiXactId multiWrapLimit;
+ MultiXactId multiVacLimit;
+ MultiXactId multiWarnLimit;
+ MultiXactId multiStopLimit;
+ MultiXactId multiWrapLimit;
/*
* Per-backend data starts here. We have two arrays stored in the area
@@ -252,7 +252,7 @@ static MultiXactId *OldestVisibleMXactId;
* so they will be uninteresting by the time our next transaction starts.
* (XXX not clear that this is correct --- other members of the MultiXact
* could hang around longer than we did. However, it's not clear what a
- * better policy for flushing old cache entries would be.) FIXME actually
+ * better policy for flushing old cache entries would be.) FIXME actually
* this is plain wrong now that multixact's may contain update Xids.
*
* We allocate the cache entries in a memory context that is deleted at
@@ -291,7 +291,7 @@ static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
static MultiXactId GetNewMultiXactId(int nmembers, MultiXactOffset *offset);
/* MultiXact cache management */
-static int mxactMemberComparator(const void *arg1, const void *arg2);
+static int mxactMemberComparator(const void *arg1, const void *arg2);
static MultiXactId mXactCacheGetBySet(int nmembers, MultiXactMember *members);
static int mXactCacheGetById(MultiXactId multi, MultiXactMember **members);
static void mXactCachePut(MultiXactId multi, int nmembers,
@@ -387,15 +387,15 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
multi, xid, mxstatus_to_string(status));
/*
- * Note: we don't allow for old multis here. The reason is that the
- * only caller of this function does a check that the multixact is
- * no longer running.
+ * Note: we don't allow for old multis here. The reason is that the only
+ * caller of this function does a check that the multixact is no longer
+ * running.
*/
nmembers = GetMultiXactIdMembers(multi, &members, false);
if (nmembers < 0)
{
- MultiXactMember member;
+ MultiXactMember member;
/*
* The MultiXactId is obsolete. This can only happen if all the
@@ -430,14 +430,14 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
}
/*
- * Determine which of the members of the MultiXactId are still of interest.
- * This is any running transaction, and also any transaction that grabbed
- * something stronger than just a lock and was committed. (An update that
- * aborted is of no interest here.)
+ * Determine which of the members of the MultiXactId are still of
+ * interest. This is any running transaction, and also any transaction
+ * that grabbed something stronger than just a lock and was committed.
+ * (An update that aborted is of no interest here.)
*
- * (Removing dead members is just an optimization, but a useful one.
- * Note we have the same race condition here as above: j could be 0 at the
- * end of the loop.)
+ * (Removing dead members is just an optimization, but a useful one. Note
+ * we have the same race condition here as above: j could be 0 at the end
+ * of the loop.)
*/
newMembers = (MultiXactMember *)
palloc(sizeof(MultiXactMember) * (nmembers + 1));
@@ -641,12 +641,12 @@ MultiXactIdSetOldestVisible(void)
/*
* ReadNextMultiXactId
- * Return the next MultiXactId to be assigned, but don't allocate it
+ * Return the next MultiXactId to be assigned, but don't allocate it
*/
MultiXactId
ReadNextMultiXactId(void)
{
- MultiXactId mxid;
+ MultiXactId mxid;
/* XXX we could presumably do this without a lock. */
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -722,9 +722,9 @@ CreateMultiXactId(int nmembers, MultiXactMember *members)
/*
* XXX Note: there's a lot of padding space in MultiXactMember. We could
- * find a more compact representation of this Xlog record -- perhaps all the
- * status flags in one XLogRecData, then all the xids in another one? Not
- * clear that it's worth the trouble though.
+ * find a more compact representation of this Xlog record -- perhaps all
+ * the status flags in one XLogRecData, then all the xids in another one?
+ * Not clear that it's worth the trouble though.
*/
rdata[0].data = (char *) (&xlrec);
rdata[0].len = SizeOfMultiXactCreate;
@@ -878,7 +878,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
- * against catastrophic data loss due to multixact wraparound. The basic
+ * against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
@@ -892,7 +892,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
{
/*
* For safety's sake, we release MultiXactGenLock while sending
- * signals, warnings, etc. This is not so much because we care about
+ * signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
@@ -923,15 +923,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"",
oldest_datname),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u",
oldest_datoid),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
}
else if (!MultiXactIdPrecedes(result, multiWarnLimit))
{
@@ -943,15 +943,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errmsg("database \"%s\" must be vacuumed before %u more MultiXactIds are used",
oldest_datname,
multiWrapLimit - result),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(WARNING,
(errmsg("database with OID %u must be vacuumed before %u more MultiXactIds are used",
oldest_datoid,
multiWrapLimit - result),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
}
/* Re-acquire lock and start over */
@@ -995,10 +995,10 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
*
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
- * or the first value on a segment-beginning page after this routine exits,
- * so anyone else looking at the variable must be prepared to deal with
- * either case. Similarly, nextOffset may be zero, but we won't use that
- * as the actual start offset of the next multixact.
+ * or the first value on a segment-beginning page after this routine
+ * exits, so anyone else looking at the variable must be prepared to deal
+ * with either case. Similarly, nextOffset may be zero, but we won't use
+ * that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@@ -1066,18 +1066,18 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it should have already been frozen by vacuum. We've truncated
- * the on-disk structures anyway. Returning the wrong values could lead to
- * an incorrect visibility result. However, to support pg_upgrade we need
- * to allow an empty set to be returned regardless, if the caller is
+ * the on-disk structures anyway. Returning the wrong values could lead
+ * to an incorrect visibility result. However, to support pg_upgrade we
+ * need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
+ * tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. This raises
- * a hard error.
+ * seen, it implies undetected ID wraparound has occurred. This raises a
+ * hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
* Acquire it just long enough to grab the current counter values. We may
@@ -1095,8 +1095,8 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
{
ereport(allow_old ? DEBUG1 : ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
- multi)));
+ errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
+ multi)));
return -1;
}
@@ -1349,7 +1349,7 @@ mXactCacheGetById(MultiXactId multi, MultiXactMember **members)
memcpy(ptr, entry->members, size);
debug_elog3(DEBUG2, "CacheGet: found %s",
- mxid_to_string(multi, entry->nmembers, entry->members));
+ mxid_to_string(multi, entry->nmembers, entry->members));
return entry->nmembers;
}
}
@@ -1423,8 +1423,8 @@ mxstatus_to_string(MultiXactStatus status)
char *
mxid_to_string(MultiXactId multi, int nmembers, MultiXactMember *members)
{
- static char *str = NULL;
- StringInfoData buf;
+ static char *str = NULL;
+ StringInfoData buf;
int i;
if (str != NULL)
@@ -1721,7 +1721,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
*
* StartupXLOG has already established nextMXact/nextOffset by calling
* MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact, and the oldestMulti
- * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
+ * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
* already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
@@ -1883,17 +1883,17 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
void
SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
{
- MultiXactId multiVacLimit;
- MultiXactId multiWarnLimit;
- MultiXactId multiStopLimit;
- MultiXactId multiWrapLimit;
- MultiXactId curMulti;
+ MultiXactId multiVacLimit;
+ MultiXactId multiWarnLimit;
+ MultiXactId multiStopLimit;
+ MultiXactId multiWrapLimit;
+ MultiXactId curMulti;
Assert(MultiXactIdIsValid(oldest_datminmxid));
/*
* The place where we actually get into deep trouble is halfway around
- * from the oldest potentially-existing XID/multi. (This calculation is
+ * from the oldest potentially-existing XID/multi. (This calculation is
* probably off by one or two counts for Xids, because the special XIDs
* reduce the size of the loop a little bit. But we throw in plenty of
* slop below, so it doesn't matter.)
@@ -1911,11 +1911,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
multiStopLimit -= FirstMultiXactId;
/*
- * We'll start complaining loudly when we get within 10M multis of the stop
- * point. This is kind of arbitrary, but if you let your gas gauge get
- * down to 1% of full, would you be looking for the next gas station? We
- * need to be fairly liberal about this number because there are lots of
- * scenarios where most transactions are done by automatic clients that
+ * We'll start complaining loudly when we get within 10M multis of the
+ * stop point. This is kind of arbitrary, but if you let your gas gauge
+ * get down to 1% of full, would you be looking for the next gas station?
+ * We need to be fairly liberal about this number because there are lots
+ * of scenarios where most transactions are done by automatic clients that
* won't pay attention to warnings. (No, we're not gonna make this
* configurable. If you know enough to configure it, you know enough to
* not get in this kind of trouble in the first place.)
@@ -1925,8 +1925,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
multiWarnLimit -= FirstMultiXactId;
/*
- * We'll start trying to force autovacuums when oldest_datminmxid gets
- * to be more than 200 million transactions old.
+ * We'll start trying to force autovacuums when oldest_datminmxid gets to
+ * be more than 200 million transactions old.
*/
multiVacLimit = oldest_datminmxid + 200000000;
if (multiVacLimit < FirstMultiXactId)
@@ -1945,8 +1945,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/* Log the info */
ereport(DEBUG1,
- (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
- multiWrapLimit, oldest_datoid)));
+ (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
+ multiWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
@@ -2127,9 +2127,9 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
MultiXactId
GetOldestMultiXactId(void)
{
- MultiXactId oldestMXact;
- MultiXactId nextMXact;
- int i;
+ MultiXactId oldestMXact;
+ MultiXactId nextMXact;
+ int i;
/*
* This is the oldest valid value among all the OldestMemberMXactId[] and
@@ -2168,17 +2168,17 @@ GetOldestMultiXactId(void)
typedef struct mxtruncinfo
{
- int earliestExistingPage;
+ int earliestExistingPage;
} mxtruncinfo;
/*
* SlruScanDirectory callback
- * This callback determines the earliest existing page number.
+ * This callback determines the earliest existing page number.
*/
static bool
SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
{
- mxtruncinfo *trunc = (mxtruncinfo *) data;
+ mxtruncinfo *trunc = (mxtruncinfo *) data;
if (trunc->earliestExistingPage == -1 ||
ctl->PagePrecedes(segpage, trunc->earliestExistingPage))
@@ -2186,7 +2186,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
trunc->earliestExistingPage = segpage;
}
- return false; /* keep going */
+ return false; /* keep going */
}
/*
@@ -2200,16 +2200,16 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
void
TruncateMultiXact(MultiXactId oldestMXact)
{
- MultiXactOffset oldestOffset;
- mxtruncinfo trunc;
- MultiXactId earliest;
+ MultiXactOffset oldestOffset;
+ mxtruncinfo trunc;
+ MultiXactId earliest;
/*
* Note we can't just plow ahead with the truncation; it's possible that
* there are no segments to truncate, which is a problem because we are
- * going to attempt to read the offsets page to determine where to truncate
- * the members SLRU. So we first scan the directory to determine the
- * earliest offsets page number that we can read without error.
+ * going to attempt to read the offsets page to determine where to
+ * truncate the members SLRU. So we first scan the directory to determine
+ * the earliest offsets page number that we can read without error.
*/
trunc.earliestExistingPage = -1;
SlruScanDirectory(MultiXactOffsetCtl, SlruScanDirCbFindEarliest, &trunc);
@@ -2220,9 +2220,9 @@ TruncateMultiXact(MultiXactId oldestMXact)
return;
/*
- * First, compute the safe truncation point for MultiXactMember.
- * This is the starting offset of the multixact we were passed
- * as MultiXactOffset cutoff.
+ * First, compute the safe truncation point for MultiXactMember. This is
+ * the starting offset of the multixact we were passed as MultiXactOffset
+ * cutoff.
*/
{
int pageno;
@@ -2380,7 +2380,7 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
- (xl_multixact_create *) XLogRecGetData(record);
+ (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
@@ -2427,12 +2427,12 @@ pg_get_multixact_members(PG_FUNCTION_ARGS)
{
typedef struct
{
- MultiXactMember *members;
- int nmembers;
- int iter;
+ MultiXactMember *members;
+ int nmembers;
+ int iter;
} mxact;
- MultiXactId mxid = PG_GETARG_UINT32(0);
- mxact *multi;
+ MultiXactId mxid = PG_GETARG_UINT32(0);
+ mxact *multi;
FuncCallContext *funccxt;
if (mxid < FirstMultiXactId)
diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c
index 921da62c22a..7bb523a4fb4 100644
--- a/src/backend/access/transam/timeline.c
+++ b/src/backend/access/transam/timeline.c
@@ -15,7 +15,7 @@
* <parentTLI> <switchpoint> <reason>
*
* parentTLI ID of the parent timeline
- * switchpoint XLogRecPtr of the WAL position where the switch happened
+ * switchpoint XLogRecPtr of the WAL position where the switch happened
* reason human-readable explanation of why the timeline was changed
*
* The fields are separated by tabs. Lines beginning with # are comments, and
@@ -49,7 +49,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
{
char path[MAXPGPATH];
char histfname[MAXFNAMELEN];
- TimeLineID tli;
+ TimeLineID tli;
for (tli = begin; tli < end; tli++)
{
@@ -179,8 +179,8 @@ readTimeLineHistory(TimeLineID targetTLI)
errhint("Timeline IDs must be less than child timeline's ID.")));
/*
- * Create one more entry for the "tip" of the timeline, which has no
- * entry in the history file.
+ * Create one more entry for the "tip" of the timeline, which has no entry
+ * in the history file.
*/
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry->tli = targetTLI;
@@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
/*
* Prefer link() to rename() here just to be really sure that we don't
- * overwrite an existing file. However, there shouldn't be one, so
+ * overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
@@ -530,7 +530,7 @@ writeTimeLineHistoryFile(TimeLineID tli, char *content, int size)
bool
tliInHistory(TimeLineID tli, List *expectedTLEs)
{
- ListCell *cell;
+ ListCell *cell;
foreach(cell, expectedTLEs)
{
@@ -548,11 +548,12 @@ tliInHistory(TimeLineID tli, List *expectedTLEs)
TimeLineID
tliOfPointInHistory(XLogRecPtr ptr, List *history)
{
- ListCell *cell;
+ ListCell *cell;
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
+
if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
(XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
{
@@ -563,7 +564,7 @@ tliOfPointInHistory(XLogRecPtr ptr, List *history)
/* shouldn't happen. */
elog(ERROR, "timeline history was not contiguous");
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
/*
@@ -579,7 +580,7 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
if (nextTLI)
*nextTLI = 0;
- foreach (cell, history)
+ foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
@@ -592,5 +593,5 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
ereport(ERROR,
(errmsg("requested timeline %u is not in this server's history",
tli)));
- return InvalidXLogRecPtr; /* keep compiler quiet */
+ return InvalidXLogRecPtr; /* keep compiler quiet */
}
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index e62286f9f98..31e868d4bc7 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1024,8 +1024,8 @@ RecordTransactionCommit(void)
*
* It's safe to change the delayChkpt flag of our own backend without
* holding the ProcArrayLock, since we're the only one modifying it.
- * This makes checkpoint's determination of which xacts are delayChkpt a
- * bit fuzzy, but it doesn't matter.
+ * This makes checkpoint's determination of which xacts are delayChkpt
+ * a bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyPgXact->delayChkpt = true;
@@ -4683,12 +4683,11 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
* from the template database, and then commit the transaction. If we
* crash after all the files have been copied but before the commit, you
* have files in the data directory without an entry in pg_database. To
- * minimize the window
- * for that, we use ForceSyncCommit() to rush the commit record to disk as
- * quick as possible. We have the same window during recovery, and forcing
- * an XLogFlush() (which updates minRecoveryPoint during recovery) helps
- * to reduce that problem window, for any user that requested
- * ForceSyncCommit().
+ * minimize the window for that, we use ForceSyncCommit() to rush the
+ * commit record to disk as quick as possible. We have the same window
+ * during recovery, and forcing an XLogFlush() (which updates
+ * minRecoveryPoint during recovery) helps to reduce that problem window,
+ * for any user that requested ForceSyncCommit().
*/
if (XactCompletionForceSyncCommit(xinfo))
XLogFlush(lsn);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 76b52fb1dcb..dcd33c931c0 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -200,14 +200,14 @@ static int LocalXLogInsertAllowed = -1;
* will switch to using offline XLOG archives as soon as we reach the end of
* WAL in pg_xlog.
*/
-bool ArchiveRecoveryRequested = false;
-bool InArchiveRecovery = false;
+bool ArchiveRecoveryRequested = false;
+bool InArchiveRecovery = false;
/* Was the last xlog file restored from archive, or local? */
static bool restoredFromArchive = false;
/* options taken from recovery.conf for archive recovery */
-char *recoveryRestoreCommand = NULL;
+char *recoveryRestoreCommand = NULL;
static char *recoveryEndCommand = NULL;
static char *archiveCleanupCommand = NULL;
static RecoveryTargetType recoveryTarget = RECOVERY_TARGET_UNSET;
@@ -223,7 +223,7 @@ static char *PrimaryConnInfo = NULL;
static char *TriggerFile = NULL;
/* are we currently in standby mode? */
-bool StandbyMode = false;
+bool StandbyMode = false;
/* whether request for fast promotion has been made yet */
static bool fast_promote = false;
@@ -403,10 +403,11 @@ typedef struct XLogCtlData
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
- XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
+ XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG
+ * segment */
/* Fake LSN counter, for unlogged relations. Protected by ulsn_lck */
- XLogRecPtr unloggedLSN;
+ XLogRecPtr unloggedLSN;
slock_t ulsn_lck;
/* Protected by WALWriteLock: */
@@ -548,14 +549,14 @@ static XLogwrtResult LogwrtResult = {0, 0};
*/
typedef enum
{
- XLOG_FROM_ANY = 0, /* request to read WAL from any source */
- XLOG_FROM_ARCHIVE, /* restored using restore_command */
- XLOG_FROM_PG_XLOG, /* existing file in pg_xlog */
- XLOG_FROM_STREAM, /* streamed from master */
+ XLOG_FROM_ANY = 0, /* request to read WAL from any source */
+ XLOG_FROM_ARCHIVE, /* restored using restore_command */
+ XLOG_FROM_PG_XLOG, /* existing file in pg_xlog */
+ XLOG_FROM_STREAM, /* streamed from master */
} XLogSource;
/* human-readable names for XLogSources, for debugging output */
-static const char *xlogSourceNames[] = { "any", "archive", "pg_xlog", "stream" };
+static const char *xlogSourceNames[] = {"any", "archive", "pg_xlog", "stream"};
/*
* openLogFile is -1 or a kernel FD for an open log file segment.
@@ -589,7 +590,7 @@ static XLogSource readSource = 0; /* XLOG_FROM_* code */
* next.
*/
static XLogSource currentSource = 0; /* XLOG_FROM_* code */
-static bool lastSourceFailed = false;
+static bool lastSourceFailed = false;
typedef struct XLogPageReadPrivate
{
@@ -607,7 +608,7 @@ typedef struct XLogPageReadPrivate
* XLogReceiptSource tracks where we last successfully read some WAL.)
*/
static TimestampTz XLogReceiptTime = 0;
-static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
+static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr; /* start of last record read */
@@ -649,7 +650,7 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo);
static bool XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
XLogRecPtr *lsn, BkpBlock *bkpb);
static Buffer RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb,
- char *blk, bool get_cleanup_lock, bool keep_buffer);
+ char *blk, bool get_cleanup_lock, bool keep_buffer);
static bool AdvanceXLInsertBuffer(bool new_segment);
static bool XLogCheckpointNeeded(XLogSegNo new_segno);
static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch);
@@ -658,7 +659,7 @@ static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
bool use_lock);
static int XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli,
int source, bool notexistOk);
-static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
+static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
static int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
TimeLineID *readTLI);
@@ -823,7 +824,7 @@ begin:;
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
if (doPageWrites && XLogCheckBuffer(rdt, true,
- &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
+ &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
{
dtbuf_bkp[i] = true;
rdt->data = NULL;
@@ -1251,10 +1252,10 @@ XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
page = BufferGetPage(rdata->buffer);
/*
- * We assume page LSN is first data on *every* page that can be passed
- * to XLogInsert, whether it has the standard page layout or not. We
- * don't need to take the buffer header lock for PageGetLSN if we hold
- * an exclusive lock on the page and/or the relation.
+ * We assume page LSN is first data on *every* page that can be passed to
+ * XLogInsert, whether it has the standard page layout or not. We don't
+ * need to take the buffer header lock for PageGetLSN if we hold an
+ * exclusive lock on the page and/or the relation.
*/
if (holdsExclusiveLock)
*lsn = PageGetLSN(page);
@@ -1545,7 +1546,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
*/
if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx])
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
- (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
+ (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
(uint32) (XLogCtl->xlblocks[curridx] >> 32),
(uint32) XLogCtl->xlblocks[curridx]);
@@ -1611,9 +1612,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
if (lseek(openLogFile, (off_t) startoffset, SEEK_SET) < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not seek in log file %s to offset %u: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo),
- startoffset)));
+ errmsg("could not seek in log file %s to offset %u: %m",
+ XLogFileNameP(ThisTimeLineID, openLogSegNo),
+ startoffset)));
openLogOff = startoffset;
}
@@ -1858,7 +1859,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
if (!force && newMinRecoveryPoint < lsn)
elog(WARNING,
"xlog min recovery request %X/%X is past current point %X/%X",
- (uint32) (lsn >> 32) , (uint32) lsn,
+ (uint32) (lsn >> 32), (uint32) lsn,
(uint32) (newMinRecoveryPoint >> 32),
(uint32) newMinRecoveryPoint);
@@ -1872,10 +1873,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
minRecoveryPointTLI = newMinRecoveryPointTLI;
ereport(DEBUG2,
- (errmsg("updated min recovery point to %X/%X on timeline %u",
- (uint32) (minRecoveryPoint >> 32),
- (uint32) minRecoveryPoint,
- newMinRecoveryPointTLI)));
+ (errmsg("updated min recovery point to %X/%X on timeline %u",
+ (uint32) (minRecoveryPoint >> 32),
+ (uint32) minRecoveryPoint,
+ newMinRecoveryPointTLI)));
}
}
LWLockRelease(ControlFileLock);
@@ -1915,7 +1916,7 @@ XLogFlush(XLogRecPtr record)
elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
(uint32) (record >> 32), (uint32) record,
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
#endif
START_CRIT_SECTION();
@@ -1979,8 +1980,8 @@ XLogFlush(XLogRecPtr record)
/*
* Sleep before flush! By adding a delay here, we may give further
* backends the opportunity to join the backlog of group commit
- * followers; this can significantly improve transaction throughput, at
- * the risk of increasing transaction latency.
+ * followers; this can significantly improve transaction throughput,
+ * at the risk of increasing transaction latency.
*
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active transactions.
@@ -1995,7 +1996,7 @@ XLogFlush(XLogRecPtr record)
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace == 0) /* buffer is full */
+ if (freespace == 0) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
@@ -2048,7 +2049,7 @@ XLogFlush(XLogRecPtr record)
elog(ERROR,
"xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
(uint32) (record >> 32), (uint32) record,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
}
/*
@@ -2127,7 +2128,7 @@ XLogBackgroundFlush(void)
elog(LOG, "xlog bg flush request %X/%X; write %X/%X; flush %X/%X",
(uint32) (WriteRqstPtr >> 32), (uint32) WriteRqstPtr,
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
#endif
START_CRIT_SECTION();
@@ -2379,7 +2380,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
if (fd < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", path)));
+ errmsg("could not open file \"%s\": %m", path)));
elog(DEBUG2, "done creating and filling new WAL file");
@@ -2719,7 +2720,7 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
* want to read.
*
* If we haven't read the timeline history file yet, read it now, so that
- * we know which TLIs to scan. We don't save the list in expectedTLEs,
+ * we know which TLIs to scan. We don't save the list in expectedTLEs,
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
@@ -3215,8 +3216,8 @@ RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb, char *blk,
}
/*
- * The checksum value on this page is currently invalid. We don't
- * need to reset it here since it will be set before being written.
+ * The checksum value on this page is currently invalid. We don't need to
+ * reset it here since it will be set before being written.
*/
PageSetLSN(page, lsn);
@@ -3258,7 +3259,7 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
for (;;)
{
- char *errormsg;
+ char *errormsg;
record = XLogReadRecord(xlogreader, RecPtr, &errormsg);
ReadRecPtr = xlogreader->ReadRecPtr;
@@ -3272,34 +3273,35 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
}
/*
- * We only end up here without a message when XLogPageRead() failed
- * - in that case we already logged something.
- * In StandbyMode that only happens if we have been triggered, so
- * we shouldn't loop anymore in that case.
+ * We only end up here without a message when XLogPageRead()
+ * failed - in that case we already logged something. In
+ * StandbyMode that only happens if we have been triggered, so we
+ * shouldn't loop anymore in that case.
*/
if (errormsg)
ereport(emode_for_corrupt_record(emode,
RecPtr ? RecPtr : EndRecPtr),
- (errmsg_internal("%s", errormsg) /* already translated */));
+ (errmsg_internal("%s", errormsg) /* already translated */ ));
}
+
/*
* Check page TLI is one of the expected values.
*/
else if (!tliInHistory(xlogreader->latestPageTLI, expectedTLEs))
{
char fname[MAXFNAMELEN];
- XLogSegNo segno;
- int32 offset;
+ XLogSegNo segno;
+ int32 offset;
XLByteToSeg(xlogreader->latestPagePtr, segno);
offset = xlogreader->latestPagePtr % XLogSegSize;
XLogFileName(fname, xlogreader->readPageTLI, segno);
ereport(emode_for_corrupt_record(emode,
RecPtr ? RecPtr : EndRecPtr),
- (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
- xlogreader->latestPageTLI,
- fname,
- offset)));
+ (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
+ xlogreader->latestPageTLI,
+ fname,
+ offset)));
record = NULL;
}
@@ -3314,10 +3316,10 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
lastSourceFailed = true;
/*
- * If archive recovery was requested, but we were still doing crash
- * recovery, switch to archive recovery and retry using the offline
- * archive. We have now replayed all the valid WAL in pg_xlog, so
- * we are presumably now consistent.
+ * If archive recovery was requested, but we were still doing
+ * crash recovery, switch to archive recovery and retry using the
+ * offline archive. We have now replayed all the valid WAL in
+ * pg_xlog, so we are presumably now consistent.
*
* We require that there's at least some valid WAL present in
* pg_xlog, however (!fetch_ckpt). We could recover using the WAL
@@ -3401,11 +3403,11 @@ rescanLatestTimeLine(void)
newExpectedTLEs = readTimeLineHistory(newtarget);
/*
- * If the current timeline is not part of the history of the new
- * timeline, we cannot proceed to it.
+ * If the current timeline is not part of the history of the new timeline,
+ * we cannot proceed to it.
*/
found = false;
- foreach (cell, newExpectedTLEs)
+ foreach(cell, newExpectedTLEs)
{
currentTle = (TimeLineHistoryEntry *) lfirst(cell);
@@ -3812,7 +3814,7 @@ DataChecksumsEnabled(void)
XLogRecPtr
GetFakeLSNForUnloggedRel(void)
{
- XLogRecPtr nextUnloggedLSN;
+ XLogRecPtr nextUnloggedLSN;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -4991,15 +4993,15 @@ StartupXLOG(void)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating an XLog reading processor")));
+ errdetail("Failed while allocating an XLog reading processor")));
xlogreader->system_identifier = ControlFile->system_identifier;
if (read_backup_label(&checkPointLoc, &backupEndRequired,
&backupFromStandby))
{
/*
- * Archive recovery was requested, and thanks to the backup label file,
- * we know how far we need to replay to reach consistency. Enter
+ * Archive recovery was requested, and thanks to the backup label
+ * file, we know how far we need to replay to reach consistency. Enter
* archive recovery directly.
*/
InArchiveRecovery = true;
@@ -5017,7 +5019,7 @@ StartupXLOG(void)
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
@@ -5049,8 +5051,8 @@ StartupXLOG(void)
/*
* It's possible that archive recovery was requested, but we don't
* know how far we need to replay the WAL before we reach consistency.
- * This can happen for example if a base backup is taken from a running
- * server using an atomic filesystem snapshot, without calling
+ * This can happen for example if a base backup is taken from a
+ * running server using an atomic filesystem snapshot, without calling
* pg_start/stop_backup. Or if you just kill a running master server
* and put it into archive recovery by creating a recovery.conf file.
*
@@ -5058,8 +5060,8 @@ StartupXLOG(void)
* replaying all the WAL present in pg_xlog, and only enter archive
* recovery after that.
*
- * But usually we already know how far we need to replay the WAL (up to
- * minRecoveryPoint, up to backupEndPoint, or until we see an
+ * But usually we already know how far we need to replay the WAL (up
+ * to minRecoveryPoint, up to backupEndPoint, or until we see an
* end-of-backup record), and we can enter archive recovery directly.
*/
if (ArchiveRecoveryRequested &&
@@ -5084,7 +5086,7 @@ StartupXLOG(void)
{
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
}
else if (StandbyMode)
{
@@ -5103,7 +5105,7 @@ StartupXLOG(void)
{
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
@@ -5119,15 +5121,16 @@ StartupXLOG(void)
* timeline in the history of the requested timeline, we cannot proceed:
* the backup is not part of the history of the requested timeline.
*/
- Assert(expectedTLEs); /* was initialized by reading checkpoint record */
+ Assert(expectedTLEs); /* was initialized by reading checkpoint
+ * record */
if (tliOfPointInHistory(checkPointLoc, expectedTLEs) !=
- checkPoint.ThisTimeLineID)
+ checkPoint.ThisTimeLineID)
{
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
/*
- * tliSwitchPoint will throw an error if the checkpoint's timeline
- * is not in expectedTLEs at all.
+ * tliSwitchPoint will throw an error if the checkpoint's timeline is
+ * not in expectedTLEs at all.
*/
switchpoint = tliSwitchPoint(ControlFile->checkPointCopy.ThisTimeLineID, expectedTLEs, NULL);
ereport(FATAL,
@@ -5146,8 +5149,8 @@ StartupXLOG(void)
* history, too.
*/
if (!XLogRecPtrIsInvalid(ControlFile->minRecoveryPoint) &&
- tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
- ControlFile->minRecoveryPointTLI)
+ tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
+ ControlFile->minRecoveryPointTLI)
ereport(FATAL,
(errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
recoveryTargetTLI,
@@ -5159,7 +5162,7 @@ StartupXLOG(void)
ereport(DEBUG1,
(errmsg("redo record is at %X/%X; shutdown %s",
- (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
+ (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
@@ -5206,16 +5209,16 @@ StartupXLOG(void)
ThisTimeLineID = checkPoint.ThisTimeLineID;
/*
- * Copy any missing timeline history files between 'now' and the
- * recovery target timeline from archive to pg_xlog. While we don't need
- * those files ourselves - the history file of the recovery target
- * timeline covers all the previous timelines in the history too - a
- * cascading standby server might be interested in them. Or, if you
- * archive the WAL from this server to a different archive than the
- * master, it'd be good for all the history files to get archived there
- * after failover, so that you can use one of the old timelines as a
- * PITR target. Timeline history files are small, so it's better to copy
- * them unnecessarily than not copy them and regret later.
+ * Copy any missing timeline history files between 'now' and the recovery
+ * target timeline from archive to pg_xlog. While we don't need those
+ * files ourselves - the history file of the recovery target timeline
+ * covers all the previous timelines in the history too - a cascading
+ * standby server might be interested in them. Or, if you archive the WAL
+ * from this server to a different archive than the master, it'd be good
+ * for all the history files to get archived there after failover, so that
+ * you can use one of the old timelines as a PITR target. Timeline history
+ * files are small, so it's better to copy them unnecessarily than not
+ * copy them and regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
@@ -5271,10 +5274,10 @@ StartupXLOG(void)
"automatic recovery in progress")));
if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID)
ereport(LOG,
- (errmsg("crash recovery starts in timeline %u "
- "and has target timeline %u",
- ControlFile->checkPointCopy.ThisTimeLineID,
- recoveryTargetTLI)));
+ (errmsg("crash recovery starts in timeline %u "
+ "and has target timeline %u",
+ ControlFile->checkPointCopy.ThisTimeLineID,
+ recoveryTargetTLI)));
ControlFile->state = DB_IN_CRASH_RECOVERY;
}
ControlFile->prevCheckPoint = ControlFile->checkPoint;
@@ -5509,14 +5512,15 @@ StartupXLOG(void)
ereport(LOG,
(errmsg("redo starts at %X/%X",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
/*
* main redo apply loop
*/
do
{
- bool switchedTLI = false;
+ bool switchedTLI = false;
+
#ifdef WAL_DEBUG
if (XLOG_DEBUG ||
(rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
@@ -5526,8 +5530,8 @@ StartupXLOG(void)
initStringInfo(&buf);
appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
- (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
+ (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
xlog_outrec(&buf, record);
appendStringInfo(&buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(&buf,
@@ -5598,13 +5602,13 @@ StartupXLOG(void)
}
/*
- * Before replaying this record, check if this record
- * causes the current timeline to change. The record is
- * already considered to be part of the new timeline,
- * so we update ThisTimeLineID before replaying it.
- * That's important so that replayEndTLI, which is
- * recorded as the minimum recovery point's TLI if
- * recovery stops after this record, is set correctly.
+ * Before replaying this record, check if this record causes
+ * the current timeline to change. The record is already
+ * considered to be part of the new timeline, so we update
+ * ThisTimeLineID before replaying it. That's important so
+ * that replayEndTLI, which is recorded as the minimum
+ * recovery point's TLI if recovery stops after this record,
+ * is set correctly.
*/
if (record->xl_rmid == RM_XLOG_ID)
{
@@ -5622,7 +5626,7 @@ StartupXLOG(void)
}
else if (info == XLOG_END_OF_RECOVERY)
{
- xl_end_of_recovery xlrec;
+ xl_end_of_recovery xlrec;
memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
newTLI = xlrec.ThisTimeLineID;
@@ -5699,7 +5703,7 @@ StartupXLOG(void)
ereport(LOG,
(errmsg("redo done at %X/%X",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
xtime = GetLatestXTime();
if (xtime)
ereport(LOG,
@@ -5804,7 +5808,7 @@ StartupXLOG(void)
PrevTimeLineID = ThisTimeLineID;
if (ArchiveRecoveryRequested)
{
- char reason[200];
+ char reason[200];
Assert(InArchiveRecovery);
@@ -5952,8 +5956,9 @@ StartupXLOG(void)
* allows some extra error checking in xlog_redo.
*
* In fast promotion, only create a lightweight end-of-recovery record
- * instead of a full checkpoint. A checkpoint is requested later, after
- * we're fully out of recovery mode and already accepting queries.
+ * instead of a full checkpoint. A checkpoint is requested later,
+ * after we're fully out of recovery mode and already accepting
+ * queries.
*/
if (bgwriterLaunched)
{
@@ -5972,14 +5977,15 @@ StartupXLOG(void)
fast_promoted = true;
/*
- * Insert a special WAL record to mark the end of recovery,
- * since we aren't doing a checkpoint. That means that the
- * checkpointer process may likely be in the middle of a
- * time-smoothed restartpoint and could continue to be for
- * minutes after this. That sounds strange, but the effect
- * is roughly the same and it would be stranger to try to
- * come out of the restartpoint and then checkpoint.
- * We request a checkpoint later anyway, just for safety.
+ * Insert a special WAL record to mark the end of
+ * recovery, since we aren't doing a checkpoint. That
+ * means that the checkpointer process may likely be in
+ * the middle of a time-smoothed restartpoint and could
+ * continue to be for minutes after this. That sounds
+ * strange, but the effect is roughly the same and it
+ * would be stranger to try to come out of the
+ * restartpoint and then checkpoint. We request a
+ * checkpoint later anyway, just for safety.
*/
CreateEndOfRecoveryRecord();
}
@@ -5987,8 +5993,8 @@ StartupXLOG(void)
if (!fast_promoted)
RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
- CHECKPOINT_IMMEDIATE |
- CHECKPOINT_WAIT);
+ CHECKPOINT_IMMEDIATE |
+ CHECKPOINT_WAIT);
}
else
CreateCheckPoint(CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_IMMEDIATE);
@@ -6092,8 +6098,8 @@ StartupXLOG(void)
}
/*
- * If there were cascading standby servers connected to us, nudge any
- * wal sender processes to notice that we've been promoted.
+ * If there were cascading standby servers connected to us, nudge any wal
+ * sender processes to notice that we've been promoted.
*/
WalSndWakeup();
@@ -6151,9 +6157,9 @@ CheckRecoveryConsistency(void)
}
/*
- * Have we passed our safe starting point? Note that minRecoveryPoint
- * is known to be incorrectly set if ControlFile->backupEndRequired,
- * until the XLOG_BACKUP_RECORD arrives to advise us of the correct
+ * Have we passed our safe starting point? Note that minRecoveryPoint is
+ * known to be incorrectly set if ControlFile->backupEndRequired, until
+ * the XLOG_BACKUP_RECORD arrives to advise us of the correct
* minRecoveryPoint. All we know prior to that is that we're not
* consistent yet.
*/
@@ -6770,7 +6776,7 @@ CreateCheckPoint(int flags)
uint32 freespace;
XLogSegNo _logSegNo;
VirtualTransactionId *vxids;
- int nvxids;
+ int nvxids;
/*
* An end-of-recovery checkpoint is really a shutdown checkpoint, just
@@ -6946,13 +6952,13 @@ CreateCheckPoint(int flags)
TRACE_POSTGRESQL_CHECKPOINT_START(flags);
/*
- * In some cases there are groups of actions that must all occur on
- * one side or the other of a checkpoint record. Before flushing the
+ * In some cases there are groups of actions that must all occur on one
+ * side or the other of a checkpoint record. Before flushing the
* checkpoint record we must explicitly wait for any backend currently
* performing those groups of actions.
*
* One example is end of transaction, so we must wait for any transactions
- * that are currently in commit critical sections. If an xact inserted
+ * that are currently in commit critical sections. If an xact inserted
* its commit record into XLOG just before the REDO point, then a crash
* restart from the REDO point would not replay that record, which means
* that our flushing had better include the xact's update of pg_clog. So
@@ -6977,7 +6983,7 @@ CreateCheckPoint(int flags)
vxids = GetVirtualXIDsDelayingChkpt(&nvxids);
if (nvxids > 0)
{
- uint32 nwaits = 0;
+ uint32 nwaits = 0;
do
{
@@ -7182,9 +7188,9 @@ CreateCheckPoint(int flags)
void
CreateEndOfRecoveryRecord(void)
{
- xl_end_of_recovery xlrec;
- XLogRecData rdata;
- XLogRecPtr recptr;
+ xl_end_of_recovery xlrec;
+ XLogRecData rdata;
+ XLogRecPtr recptr;
/* sanity check */
if (!RecoveryInProgress())
@@ -7211,8 +7217,8 @@ CreateEndOfRecoveryRecord(void)
XLogFlush(recptr);
/*
- * Update the control file so that crash recovery can follow
- * the timeline changes to this point.
+ * Update the control file so that crash recovery can follow the timeline
+ * changes to this point.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
ControlFile->time = (pg_time_t) xlrec.end_time;
@@ -7223,7 +7229,7 @@ CreateEndOfRecoveryRecord(void)
END_CRIT_SECTION();
- LocalXLogInsertAllowed = -1; /* return to "check" state */
+ LocalXLogInsertAllowed = -1; /* return to "check" state */
}
/*
@@ -7375,7 +7381,7 @@ CreateRestartPoint(int flags)
{
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
+ (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
if (flags & CHECKPOINT_IS_SHUTDOWN)
@@ -7458,7 +7464,8 @@ CreateRestartPoint(int flags)
XLogRecPtr endptr;
/*
- * Get the current end of xlog replayed or received, whichever is later.
+ * Get the current end of xlog replayed or received, whichever is
+ * later.
*/
receivePtr = GetWalRcvWriteRecPtr(NULL, NULL);
replayPtr = GetXLogReplayRecPtr(NULL);
@@ -7468,8 +7475,8 @@ CreateRestartPoint(int flags)
_logSegNo--;
/*
- * Update ThisTimeLineID to the timeline we're currently replaying,
- * so that we install any recycled segments on that timeline.
+ * Update ThisTimeLineID to the timeline we're currently replaying, so
+ * that we install any recycled segments on that timeline.
*
* There is no guarantee that the WAL segments will be useful on the
* current timeline; if recovery proceeds to a new timeline right
@@ -7480,13 +7487,13 @@ CreateRestartPoint(int flags)
* It's possible or perhaps even likely that we finish recovery while
* a restartpoint is in progress. That means we may get to this point
* some minutes afterwards. Setting ThisTimeLineID at that time would
- * actually set it backwards, so we don't want that to persist; if
- * we do reset it here, make sure to reset it back afterwards. This
+ * actually set it backwards, so we don't want that to persist; if we
+ * do reset it here, make sure to reset it back afterwards. This
* doesn't look very clean or principled, but its the best of about
* five different ways of handling this edge case.
*/
if (RecoveryInProgress())
- (void) GetXLogReplayRecPtr(&ThisTimeLineID);
+ (void) GetXLogReplayRecPtr(&ThisTimeLineID);
RemoveOldXlogFiles(_logSegNo, endptr);
@@ -7519,7 +7526,7 @@ CreateRestartPoint(int flags)
xtime = GetLatestXTime();
ereport((log_checkpoints ? LOG : DEBUG2),
(errmsg("recovery restart point at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
+ (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
xtime ? errdetail("last completed transaction was at log time %s",
timestamptz_to_str(xtime)) : 0));
@@ -7677,10 +7684,10 @@ XLogRestorePoint(const char *rpName)
XLogRecPtr
XLogSaveBufferForHint(Buffer buffer)
{
- XLogRecPtr recptr = InvalidXLogRecPtr;
- XLogRecPtr lsn;
+ XLogRecPtr recptr = InvalidXLogRecPtr;
+ XLogRecPtr lsn;
XLogRecData rdata[2];
- BkpBlock bkpb;
+ BkpBlock bkpb;
/*
* Ensure no checkpoint can change our view of RedoRecPtr.
@@ -7693,8 +7700,8 @@ XLogSaveBufferForHint(Buffer buffer)
GetRedoRecPtr();
/*
- * Setup phony rdata element for use within XLogCheckBuffer only.
- * We reuse and reset rdata for any actual WAL record insert.
+ * Setup phony rdata element for use within XLogCheckBuffer only. We reuse
+ * and reset rdata for any actual WAL record insert.
*/
rdata[0].buffer = buffer;
rdata[0].buffer_std = true;
@@ -7704,8 +7711,8 @@ XLogSaveBufferForHint(Buffer buffer)
*/
if (XLogCheckBuffer(rdata, false, &lsn, &bkpb))
{
- char copied_buffer[BLCKSZ];
- char *origdata = (char *) BufferGetBlock(buffer);
+ char copied_buffer[BLCKSZ];
+ char *origdata = (char *) BufferGetBlock(buffer);
/*
* Copy buffer so we don't have to worry about concurrent hint bit or
@@ -7714,8 +7721,8 @@ XLogSaveBufferForHint(Buffer buffer)
*/
memcpy(copied_buffer, origdata, bkpb.hole_offset);
memcpy(copied_buffer + bkpb.hole_offset,
- origdata + bkpb.hole_offset + bkpb.hole_length,
- BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
+ origdata + bkpb.hole_offset + bkpb.hole_length,
+ BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
/*
* Header for backup block.
@@ -7861,25 +7868,24 @@ checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI, TimeLineID prevTLI)
ereport(PANIC,
(errmsg("unexpected prev timeline ID %u (current timeline ID %u) in checkpoint record",
prevTLI, ThisTimeLineID)));
+
/*
- * The new timeline better be in the list of timelines we expect
- * to see, according to the timeline history. It should also not
- * decrease.
+ * The new timeline better be in the list of timelines we expect to see,
+ * according to the timeline history. It should also not decrease.
*/
if (newTLI < ThisTimeLineID || !tliInHistory(newTLI, expectedTLEs))
ereport(PANIC,
- (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
- newTLI, ThisTimeLineID)));
+ (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
+ newTLI, ThisTimeLineID)));
/*
- * If we have not yet reached min recovery point, and we're about
- * to switch to a timeline greater than the timeline of the min
- * recovery point: trouble. After switching to the new timeline,
- * we could not possibly visit the min recovery point on the
- * correct timeline anymore. This can happen if there is a newer
- * timeline in the archive that branched before the timeline the
- * min recovery point is on, and you attempt to do PITR to the
- * new timeline.
+ * If we have not yet reached min recovery point, and we're about to
+ * switch to a timeline greater than the timeline of the min recovery
+ * point: trouble. After switching to the new timeline, we could not
+ * possibly visit the min recovery point on the correct timeline anymore.
+ * This can happen if there is a newer timeline in the archive that
+ * branched before the timeline the min recovery point is on, and you
+ * attempt to do PITR to the new timeline.
*/
if (!XLogRecPtrIsInvalid(minRecoveryPoint) &&
lsn < minRecoveryPoint &&
@@ -8101,21 +8107,21 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
}
else if (info == XLOG_HINT)
{
- char *data;
- BkpBlock bkpb;
+ char *data;
+ BkpBlock bkpb;
/*
- * Hint bit records contain a backup block stored "inline" in the normal
- * data since the locking when writing hint records isn't sufficient to
- * use the normal backup block mechanism, which assumes exclusive lock
- * on the buffer supplied.
+ * Hint bit records contain a backup block stored "inline" in the
+ * normal data since the locking when writing hint records isn't
+ * sufficient to use the normal backup block mechanism, which assumes
+ * exclusive lock on the buffer supplied.
*
- * Since the only change in these backup block are hint bits, there are
- * no recovery conflicts generated.
+ * Since the only change in these backup block are hint bits, there
+ * are no recovery conflicts generated.
*
- * This also means there is no corresponding API call for this,
- * so an smgr implementation has no need to implement anything.
- * Which means nothing is needed in md.c etc
+ * This also means there is no corresponding API call for this, so an
+ * smgr implementation has no need to implement anything. Which means
+ * nothing is needed in md.c etc
*/
data = XLogRecGetData(record);
memcpy(&bkpb, data, sizeof(BkpBlock));
@@ -8318,7 +8324,7 @@ assign_xlog_sync_method(int new_sync_method, void *extra)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not fsync log segment %s: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo))));
+ XLogFileNameP(ThisTimeLineID, openLogSegNo))));
if (get_sync_bit(sync_method) != get_sync_bit(new_sync_method))
XLogFileClose();
}
@@ -8349,8 +8355,8 @@ issue_xlog_fsync(int fd, XLogSegNo segno)
if (pg_fsync_writethrough(fd) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync write-through log file %s: %m",
- XLogFileNameP(ThisTimeLineID, segno))));
+ errmsg("could not fsync write-through log file %s: %m",
+ XLogFileNameP(ThisTimeLineID, segno))));
break;
#endif
#ifdef HAVE_FDATASYNC
@@ -8379,6 +8385,7 @@ char *
XLogFileNameP(TimeLineID tli, XLogSegNo segno)
{
char *result = palloc(MAXFNAMELEN);
+
XLogFileName(result, tli, segno);
return result;
}
@@ -8630,9 +8637,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&stamp_time, log_timezone));
appendStringInfo(&labelfbuf, "START WAL LOCATION: %X/%X (file %s)\n",
- (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
+ (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
appendStringInfo(&labelfbuf, "CHECKPOINT LOCATION: %X/%X\n",
- (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
+ (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
appendStringInfo(&labelfbuf, "BACKUP METHOD: %s\n",
exclusive ? "pg_start_backup" : "streamed");
appendStringInfo(&labelfbuf, "BACKUP FROM: %s\n",
@@ -8936,10 +8943,10 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
- errhint("This means that the backup being taken on the standby "
- "is corrupt and should not be used. "
+ errhint("This means that the backup being taken on the standby "
+ "is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
- "and then try an online backup again.")));
+ "and then try an online backup again.")));
LWLockAcquire(ControlFileLock, LW_SHARED);
@@ -8990,7 +8997,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
errmsg("could not create file \"%s\": %m",
histfilepath)));
fprintf(fp, "START WAL LOCATION: %X/%X (file %s)\n",
- (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
+ (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
(uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
/* transfer remaining lines from label to history file */
@@ -9366,10 +9373,10 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI)
{
XLogPageReadPrivate *private =
- (XLogPageReadPrivate *) xlogreader->private_data;
+ (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode;
uint32 targetPageOff;
- XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
+ XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
XLByteToSeg(targetPagePtr, targetSegNo);
targetPageOff = targetPagePtr % XLogSegSize;
@@ -9448,24 +9455,24 @@ retry:
readOff = targetPageOff;
if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
{
- char fname[MAXFNAMELEN];
+ char fname[MAXFNAMELEN];
XLogFileName(fname, curFileTLI, readSegNo);
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not seek in log segment %s to offset %u: %m",
+ errmsg("could not seek in log segment %s to offset %u: %m",
fname, readOff)));
goto next_record_is_invalid;
}
if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
- char fname[MAXFNAMELEN];
+ char fname[MAXFNAMELEN];
XLogFileName(fname, curFileTLI, readSegNo);
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not read from log segment %s, offset %u: %m",
+ errmsg("could not read from log segment %s, offset %u: %m",
fname, readOff)));
goto next_record_is_invalid;
}
@@ -9524,12 +9531,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
bool fetching_ckpt, XLogRecPtr tliRecPtr)
{
static pg_time_t last_fail_time = 0;
- pg_time_t now;
+ pg_time_t now;
/*-------
* Standby mode is implemented by a state machine:
*
- * 1. Read from archive (XLOG_FROM_ARCHIVE)
+ * 1. Read from archive (XLOG_FROM_ARCHIVE)
* 2. Read from pg_xlog (XLOG_FROM_PG_XLOG)
* 3. Check trigger file
* 4. Read from primary server via walreceiver (XLOG_FROM_STREAM)
@@ -9554,7 +9561,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
for (;;)
{
- int oldSource = currentSource;
+ int oldSource = currentSource;
/*
* First check if we failed to read from the current source, and
@@ -9571,11 +9578,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
break;
case XLOG_FROM_PG_XLOG:
+
/*
- * Check to see if the trigger file exists. Note that we do
- * this only after failure, so when you create the trigger
- * file, we still finish replaying as much as we can from
- * archive and pg_xlog before failover.
+ * Check to see if the trigger file exists. Note that we
+ * do this only after failure, so when you create the
+ * trigger file, we still finish replaying as much as we
+ * can from archive and pg_xlog before failover.
*/
if (StandbyMode && CheckForStandbyTrigger())
{
@@ -9584,15 +9592,15 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
}
/*
- * Not in standby mode, and we've now tried the archive and
- * pg_xlog.
+ * Not in standby mode, and we've now tried the archive
+ * and pg_xlog.
*/
if (!StandbyMode)
return false;
/*
- * If primary_conninfo is set, launch walreceiver to try to
- * stream the missing WAL.
+ * If primary_conninfo is set, launch walreceiver to try
+ * to stream the missing WAL.
*
* If fetching_ckpt is TRUE, RecPtr points to the initial
* checkpoint location. In that case, we use RedoStartLSN
@@ -9602,8 +9610,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
*/
if (PrimaryConnInfo)
{
- XLogRecPtr ptr;
- TimeLineID tli;
+ XLogRecPtr ptr;
+ TimeLineID tli;
if (fetching_ckpt)
{
@@ -9624,28 +9632,32 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
RequestXLogStreaming(tli, ptr, PrimaryConnInfo);
receivedUpto = 0;
}
+
/*
- * Move to XLOG_FROM_STREAM state in either case. We'll get
- * immediate failure if we didn't launch walreceiver, and
- * move on to the next state.
+ * Move to XLOG_FROM_STREAM state in either case. We'll
+ * get immediate failure if we didn't launch walreceiver,
+ * and move on to the next state.
*/
currentSource = XLOG_FROM_STREAM;
break;
case XLOG_FROM_STREAM:
+
/*
- * Failure while streaming. Most likely, we got here because
- * streaming replication was terminated, or promotion was
- * triggered. But we also get here if we find an invalid
- * record in the WAL streamed from master, in which case
- * something is seriously wrong. There's little chance that
- * the problem will just go away, but PANIC is not good for
- * availability either, especially in hot standby mode. So,
- * we treat that the same as disconnection, and retry from
- * archive/pg_xlog again. The WAL in the archive should be
- * identical to what was streamed, so it's unlikely that it
- * helps, but one can hope...
+ * Failure while streaming. Most likely, we got here
+ * because streaming replication was terminated, or
+ * promotion was triggered. But we also get here if we
+ * find an invalid record in the WAL streamed from master,
+ * in which case something is seriously wrong. There's
+ * little chance that the problem will just go away, but
+ * PANIC is not good for availability either, especially
+ * in hot standby mode. So, we treat that the same as
+ * disconnection, and retry from archive/pg_xlog again.
+ * The WAL in the archive should be identical to what was
+ * streamed, so it's unlikely that it helps, but one can
+ * hope...
*/
+
/*
* Before we leave XLOG_FROM_STREAM state, make sure that
* walreceiver is not active, so that it won't overwrite
@@ -9668,11 +9680,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
}
/*
- * XLOG_FROM_STREAM is the last state in our state machine,
- * so we've exhausted all the options for obtaining the
- * requested WAL. We're going to loop back and retry from
- * the archive, but if it hasn't been long since last
- * attempt, sleep 5 seconds to avoid busy-waiting.
+ * XLOG_FROM_STREAM is the last state in our state
+ * machine, so we've exhausted all the options for
+ * obtaining the requested WAL. We're going to loop back
+ * and retry from the archive, but if it hasn't been long
+ * since last attempt, sleep 5 seconds to avoid
+ * busy-waiting.
*/
now = (pg_time_t) time(NULL);
if ((now - last_fail_time) < 5)
@@ -9691,9 +9704,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
else if (currentSource == XLOG_FROM_PG_XLOG)
{
/*
- * We just successfully read a file in pg_xlog. We prefer files
- * in the archive over ones in pg_xlog, so try the next file
- * again from the archive first.
+ * We just successfully read a file in pg_xlog. We prefer files in
+ * the archive over ones in pg_xlog, so try the next file again
+ * from the archive first.
*/
if (InArchiveRecovery)
currentSource = XLOG_FROM_ARCHIVE;
@@ -9739,107 +9752,110 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
break;
case XLOG_FROM_STREAM:
- {
- bool havedata;
-
- /*
- * Check if WAL receiver is still active.
- */
- if (!WalRcvStreaming())
- {
- lastSourceFailed = true;
- break;
- }
-
- /*
- * Walreceiver is active, so see if new data has arrived.
- *
- * We only advance XLogReceiptTime when we obtain fresh WAL
- * from walreceiver and observe that we had already processed
- * everything before the most recent "chunk" that it flushed to
- * disk. In steady state where we are keeping up with the
- * incoming data, XLogReceiptTime will be updated on each cycle.
- * When we are behind, XLogReceiptTime will not advance, so the
- * grace time allotted to conflicting queries will decrease.
- */
- if (RecPtr < receivedUpto)
- havedata = true;
- else
{
- XLogRecPtr latestChunkStart;
+ bool havedata;
- receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
- if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
+ /*
+ * Check if WAL receiver is still active.
+ */
+ if (!WalRcvStreaming())
{
+ lastSourceFailed = true;
+ break;
+ }
+
+ /*
+ * Walreceiver is active, so see if new data has arrived.
+ *
+ * We only advance XLogReceiptTime when we obtain fresh
+ * WAL from walreceiver and observe that we had already
+ * processed everything before the most recent "chunk"
+ * that it flushed to disk. In steady state where we are
+ * keeping up with the incoming data, XLogReceiptTime will
+ * be updated on each cycle. When we are behind,
+ * XLogReceiptTime will not advance, so the grace time
+ * allotted to conflicting queries will decrease.
+ */
+ if (RecPtr < receivedUpto)
havedata = true;
- if (latestChunkStart <= RecPtr)
+ else
+ {
+ XLogRecPtr latestChunkStart;
+
+ receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
+ if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
{
- XLogReceiptTime = GetCurrentTimestamp();
- SetCurrentChunkStartTime(XLogReceiptTime);
+ havedata = true;
+ if (latestChunkStart <= RecPtr)
+ {
+ XLogReceiptTime = GetCurrentTimestamp();
+ SetCurrentChunkStartTime(XLogReceiptTime);
+ }
}
+ else
+ havedata = false;
}
- else
- havedata = false;
- }
- if (havedata)
- {
- /*
- * Great, streamed far enough. Open the file if it's not
- * open already. Also read the timeline history file if
- * we haven't initialized timeline history yet; it should
- * be streamed over and present in pg_xlog by now. Use
- * XLOG_FROM_STREAM so that source info is set correctly
- * and XLogReceiptTime isn't changed.
- */
- if (readFile < 0)
+ if (havedata)
{
- if (!expectedTLEs)
- expectedTLEs = readTimeLineHistory(receiveTLI);
- readFile = XLogFileRead(readSegNo, PANIC,
- receiveTLI,
- XLOG_FROM_STREAM, false);
- Assert(readFile >= 0);
+ /*
+ * Great, streamed far enough. Open the file if it's
+ * not open already. Also read the timeline history
+ * file if we haven't initialized timeline history
+ * yet; it should be streamed over and present in
+ * pg_xlog by now. Use XLOG_FROM_STREAM so that
+ * source info is set correctly and XLogReceiptTime
+ * isn't changed.
+ */
+ if (readFile < 0)
+ {
+ if (!expectedTLEs)
+ expectedTLEs = readTimeLineHistory(receiveTLI);
+ readFile = XLogFileRead(readSegNo, PANIC,
+ receiveTLI,
+ XLOG_FROM_STREAM, false);
+ Assert(readFile >= 0);
+ }
+ else
+ {
+ /* just make sure source info is correct... */
+ readSource = XLOG_FROM_STREAM;
+ XLogReceiptSource = XLOG_FROM_STREAM;
+ return true;
+ }
+ break;
}
- else
+
+ /*
+ * Data not here yet. Check for trigger, then wait for
+ * walreceiver to wake us up when new WAL arrives.
+ */
+ if (CheckForStandbyTrigger())
{
- /* just make sure source info is correct... */
- readSource = XLOG_FROM_STREAM;
- XLogReceiptSource = XLOG_FROM_STREAM;
- return true;
+ /*
+ * Note that we don't "return false" immediately here.
+ * After being triggered, we still want to replay all
+ * the WAL that was already streamed. It's in pg_xlog
+ * now, so we just treat this as a failure, and the
+ * state machine will move on to replay the streamed
+ * WAL from pg_xlog, and then recheck the trigger and
+ * exit replay.
+ */
+ lastSourceFailed = true;
+ break;
}
- break;
- }
- /*
- * Data not here yet. Check for trigger, then wait for
- * walreceiver to wake us up when new WAL arrives.
- */
- if (CheckForStandbyTrigger())
- {
/*
- * Note that we don't "return false" immediately here.
- * After being triggered, we still want to replay all the
- * WAL that was already streamed. It's in pg_xlog now, so
- * we just treat this as a failure, and the state machine
- * will move on to replay the streamed WAL from pg_xlog,
- * and then recheck the trigger and exit replay.
+ * Wait for more WAL to arrive. Time out after 5 seconds,
+ * like when polling the archive, to react to a trigger
+ * file promptly.
*/
- lastSourceFailed = true;
+ WaitLatch(&XLogCtl->recoveryWakeupLatch,
+ WL_LATCH_SET | WL_TIMEOUT,
+ 5000L);
+ ResetLatch(&XLogCtl->recoveryWakeupLatch);
break;
}
- /*
- * Wait for more WAL to arrive. Time out after 5 seconds, like
- * when polling the archive, to react to a trigger file
- * promptly.
- */
- WaitLatch(&XLogCtl->recoveryWakeupLatch,
- WL_LATCH_SET | WL_TIMEOUT,
- 5000L);
- ResetLatch(&XLogCtl->recoveryWakeupLatch);
- break;
- }
-
default:
elog(ERROR, "unexpected WAL source %d", currentSource);
}
@@ -9903,11 +9919,10 @@ CheckForStandbyTrigger(void)
if (IsPromoteTriggered())
{
/*
- * In 9.1 and 9.2 the postmaster unlinked the promote file
- * inside the signal handler. We now leave the file in place
- * and let the Startup process do the unlink. This allows
- * Startup to know whether we're doing fast or normal
- * promotion. Fast promotion takes precedence.
+ * In 9.1 and 9.2 the postmaster unlinked the promote file inside the
+ * signal handler. We now leave the file in place and let the Startup
+ * process do the unlink. This allows Startup to know whether we're
+ * doing fast or normal promotion. Fast promotion takes precedence.
*/
if (stat(FAST_PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
{
diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c
index 0c178c55c87..342975c7b64 100644
--- a/src/backend/access/transam/xlogarchive.c
+++ b/src/backend/access/transam/xlogarchive.c
@@ -87,9 +87,9 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* of log segments that weren't yet transferred to the archive.
*
* Notice that we don't actually overwrite any files when we copy back
- * from archive because the restore_command may inadvertently
- * restore inappropriate xlogs, or they may be corrupt, so we may wish to
- * fallback to the segments remaining in current XLOGDIR later. The
+ * from archive because the restore_command may inadvertently restore
+ * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
+ * to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@@ -433,19 +433,20 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
if (stat(xlogfpath, &statbuf) == 0)
{
- char oldpath[MAXPGPATH];
+ char oldpath[MAXPGPATH];
+
#ifdef WIN32
static unsigned int deletedcounter = 1;
+
/*
- * On Windows, if another process (e.g a walsender process) holds
- * the file open in FILE_SHARE_DELETE mode, unlink will succeed,
- * but the file will still show up in directory listing until the
- * last handle is closed, and we cannot rename the new file in its
- * place until that. To avoid that problem, rename the old file to
- * a temporary name first. Use a counter to create a unique
- * filename, because the same file might be restored from the
- * archive multiple times, and a walsender could still be holding
- * onto an old deleted version of it.
+ * On Windows, if another process (e.g a walsender process) holds the
+ * file open in FILE_SHARE_DELETE mode, unlink will succeed, but the
+ * file will still show up in directory listing until the last handle
+ * is closed, and we cannot rename the new file in its place until
+ * that. To avoid that problem, rename the old file to a temporary
+ * name first. Use a counter to create a unique filename, because the
+ * same file might be restored from the archive multiple times, and a
+ * walsender could still be holding onto an old deleted version of it.
*/
snprintf(oldpath, MAXPGPATH, "%s.deleted%u",
xlogfpath, deletedcounter++);
@@ -474,17 +475,17 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
path, xlogfpath)));
/*
- * Create .done file forcibly to prevent the restored segment from
- * being archived again later.
+ * Create .done file forcibly to prevent the restored segment from being
+ * archived again later.
*/
XLogArchiveForceDone(xlogfname);
/*
- * If the existing file was replaced, since walsenders might have it
- * open, request them to reload a currently-open segment. This is only
- * required for WAL segments, walsenders don't hold other files open, but
- * there's no harm in doing this too often, and we don't know what kind
- * of a file we're dealing with here.
+ * If the existing file was replaced, since walsenders might have it open,
+ * request them to reload a currently-open segment. This is only required
+ * for WAL segments, walsenders don't hold other files open, but there's
+ * no harm in doing this too often, and we don't know what kind of a file
+ * we're dealing with here.
*/
if (reload)
WalSndRqstFileReload();
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index b6bb6773d6b..b7950f77a65 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -545,8 +545,8 @@ pg_xlog_location_diff(PG_FUNCTION_ARGS)
* XXX: this won't handle values higher than 2^63 correctly.
*/
result = DatumGetNumeric(DirectFunctionCall2(numeric_sub,
- DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
- DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
+ DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
+ DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
PG_RETURN_NUMERIC(result);
}
@@ -584,7 +584,7 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m",
- BACKUP_LABEL_FILE)));
+ BACKUP_LABEL_FILE)));
PG_RETURN_NULL();
}
@@ -602,13 +602,13 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
if (ferror(lfp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
+ errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
/* Close the backup label file. */
if (FreeFile(lfp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
+ errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
if (strlen(backup_start_time) == 0)
ereport(ERROR,
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index a5e2b50fe6b..fc6ff806440 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -221,9 +221,9 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
targetRecOff = RecPtr % XLOG_BLCKSZ;
/*
- * Read the page containing the record into state->readBuf. Request
- * enough byte to cover the whole record header, or at least the part of
- * it that fits on the same page.
+ * Read the page containing the record into state->readBuf. Request enough
+ * byte to cover the whole record header, or at least the part of it that
+ * fits on the same page.
*/
readOff = ReadPageInternal(state,
targetPagePtr,