aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/transam/xlog.c11
-rw-r--r--src/backend/storage/lmgr/lwlock.c11
2 files changed, 19 insertions, 3 deletions
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index f7d4750fc0b..60c0b7ec3af 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -1495,6 +1495,17 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
* calling LWLockUpdateVar. But if it has to sleep, it will
* advertise the insertion point with LWLockUpdateVar before
* sleeping.
+ *
+ * In this loop we are only waiting for insertions that started
+ * before WaitXLogInsertionsToFinish was called. The lack of
+ * memory barriers in the loop means that we might see locks as
+ * "unused" that have since become used. This is fine because
+ * they only can be used for later insertions that we would not
+ * want to wait on anyway. Not taking a lock to acquire the
+ * current insertingAt value means that we might see older
+ * insertingAt values. This is also fine, because if we read a
+ * value too old, we will add ourselves to the wait queue, which
+ * contains atomic operations.
*/
if (LWLockWaitForVar(&WALInsertLocks[i].l.lock,
&WALInsertLocks[i].l.insertingAt,
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index ffa865eb28a..315a78cda92 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1556,9 +1556,10 @@ LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
/*
* Test first to see if it the slot is free right now.
*
- * XXX: the caller uses a spinlock before this, so we don't need a memory
- * barrier here as far as the current usage is concerned. But that might
- * not be safe in general.
+ * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
+ * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
+ * this, so we don't need a memory barrier here as far as the current
+ * usage is concerned. But that might not be safe in general.
*/
mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
@@ -1601,6 +1602,10 @@ LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
*
* Note: this function ignores shared lock holders; if the lock is held
* in shared mode, returns 'true'.
+ *
+ * Be aware that LWLockConflictsWithVar() does not include a memory barrier,
+ * hence the caller of this function may want to rely on an explicit barrier or
+ * an implied barrier via spinlock or LWLock to avoid memory ordering issues.
*/
bool
LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,