aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/ipc/sinval.c6
-rw-r--r--src/backend/storage/lmgr/lmgr.c9
2 files changed, 11 insertions, 4 deletions
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 9ab16b16ed1..8499615f14f 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -22,6 +22,9 @@
#include "utils/inval.h"
+uint64 SharedInvalidMessageCounter;
+
+
/*
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
@@ -90,6 +93,7 @@ ReceiveSharedInvalidMessages(
{
SharedInvalidationMessage *msg = &messages[nextmsg++];
+ SharedInvalidMessageCounter++;
invalFunction(msg);
}
@@ -106,6 +110,7 @@ ReceiveSharedInvalidMessages(
{
/* got a reset message */
elog(DEBUG4, "cache state reset");
+ SharedInvalidMessageCounter++;
resetFunction();
break; /* nothing more to do */
}
@@ -118,6 +123,7 @@ ReceiveSharedInvalidMessages(
{
SharedInvalidationMessage *msg = &messages[nextmsg++];
+ SharedInvalidMessageCounter++;
invalFunction(msg);
}
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 859b3852dbd..3ac098b2a9c 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -81,10 +81,11 @@ LockRelationOid(Oid relid, LOCKMODE lockmode)
/*
* Now that we have the lock, check for invalidation messages, so that we
* will update or flush any stale relcache entry before we try to use it.
- * We can skip this in the not-uncommon case that we already had the same
- * type of lock being requested, since then no one else could have
- * modified the relcache entry in an undesirable way. (In the case where
- * our own xact modifies the rel, the relcache update happens via
+ * RangeVarGetRelid() specifically relies on us for this. We can skip
+ * this in the not-uncommon case that we already had the same type of lock
+ * being requested, since then no one else could have modified the
+ * relcache entry in an undesirable way. (In the case where our own xact
+ * modifies the rel, the relcache update happens via
* CommandCounterIncrement, not here.)
*/
if (res != LOCKACQUIRE_ALREADY_HELD)