aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/transam/twophase.c2
-rw-r--r--src/backend/access/transam/xlog.c36
2 files changed, 30 insertions, 8 deletions
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 69af75c6b64..6e84cd0a216 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -327,7 +327,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
proc->databaseId = databaseid;
proc->roleId = owner;
proc->lwWaiting = false;
- proc->lwExclusive = false;
+ proc->lwWaitMode = 0;
proc->lwWaitLink = NULL;
proc->waitLock = NULL;
proc->waitProcLock = NULL;
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 4b273a8318f..cce87a3cd30 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -2118,23 +2118,43 @@ XLogFlush(XLogRecPtr record)
/* initialize to given target; may increase below */
WriteRqstPtr = record;
- /* read LogwrtResult and update local state */
+ /*
+ * Now wait until we get the write lock, or someone else does the
+ * flush for us.
+ */
+ for (;;)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
+ /* read LogwrtResult and update local state */
SpinLockAcquire(&xlogctl->info_lck);
if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write))
WriteRqstPtr = xlogctl->LogwrtRqst.Write;
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
- }
- /* done already? */
- if (!XLByteLE(record, LogwrtResult.Flush))
- {
- /* now wait for the write lock */
- LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
+ /* done already? */
+ if (XLByteLE(record, LogwrtResult.Flush))
+ break;
+
+ /*
+ * Try to get the write lock. If we can't get it immediately, wait
+ * until it's released, and recheck if we still need to do the flush
+ * or if the backend that held the lock did it for us already. This
+ * helps to maintain a good rate of group committing when the system
+ * is bottlenecked by the speed of fsyncing.
+ */
+ if (!LWLockWaitUntilFree(WALWriteLock, LW_EXCLUSIVE))
+ {
+ /*
+ * The lock is now free, but we didn't acquire it yet. Before we
+ * do, loop back to check if someone else flushed the record for
+ * us already.
+ */
+ continue;
+ }
+ /* Got the lock */
LogwrtResult = XLogCtl->Write.LogwrtResult;
if (!XLByteLE(record, LogwrtResult.Flush))
{
@@ -2163,6 +2183,8 @@ XLogFlush(XLogRecPtr record)
XLogWrite(WriteRqst, false, false);
}
LWLockRelease(WALWriteLock);
+ /* done */
+ break;
}
END_CRIT_SECTION();