aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2022-04-12 14:45:23 -0400
committerRobert Haas <rhaas@postgresql.org>2022-04-12 14:45:23 -0400
commit7fc0e7de9fb8306e84d1c15211aba4308f694455 (patch)
treece8d0213123959bce52699e8e8d837d46758a2f6 /src/backend/storage/lmgr
parent2c9381840fe2d6d1c3179350493fe5fd3dcf90b5 (diff)
downloadpostgresql-7fc0e7de9fb8306e84d1c15211aba4308f694455.tar.gz
postgresql-7fc0e7de9fb8306e84d1c15211aba4308f694455.zip
Revert the addition of GetMaxBackends() and related stuff.
This reverts commits 0147fc7, 4567596, aa64f23, and 5ecd018. There is no longer agreement that introducing this function was the right way to address the problem. The consensus now seems to favor trying to make a correct value for MaxBackends available to mdules executing their _PG_init() functions. Nathan Bossart Discussion: http://postgr.es/m/20220323045229.i23skfscdbvrsuxa@jrouhaud
Diffstat (limited to 'src/backend/storage/lmgr')
-rw-r--r--src/backend/storage/lmgr/deadlock.c31
-rw-r--r--src/backend/storage/lmgr/lock.c23
-rw-r--r--src/backend/storage/lmgr/predicate.c10
-rw-r--r--src/backend/storage/lmgr/proc.c17
4 files changed, 38 insertions, 43 deletions
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index b5d539ba5d9..cd9c0418eca 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -143,7 +143,6 @@ void
InitDeadLockChecking(void)
{
MemoryContext oldcxt;
- int max_backends = GetMaxBackends();
/* Make sure allocations are permanent */
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
@@ -152,16 +151,16 @@ InitDeadLockChecking(void)
* FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
* deadlockDetails[].
*/
- visitedProcs = (PGPROC **) palloc(max_backends * sizeof(PGPROC *));
- deadlockDetails = (DEADLOCK_INFO *) palloc(max_backends * sizeof(DEADLOCK_INFO));
+ visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
+ deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
/*
* TopoSort needs to consider at most MaxBackends wait-queue entries, and
* it needn't run concurrently with FindLockCycle.
*/
topoProcs = visitedProcs; /* re-use this space */
- beforeConstraints = (int *) palloc(max_backends * sizeof(int));
- afterConstraints = (int *) palloc(max_backends * sizeof(int));
+ beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
+ afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
/*
* We need to consider rearranging at most MaxBackends/2 wait queues
@@ -170,8 +169,8 @@ InitDeadLockChecking(void)
* MaxBackends total waiters.
*/
waitOrders = (WAIT_ORDER *)
- palloc((max_backends / 2) * sizeof(WAIT_ORDER));
- waitOrderProcs = (PGPROC **) palloc(max_backends * sizeof(PGPROC *));
+ palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
+ waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
/*
* Allow at most MaxBackends distinct constraints in a configuration. (Is
@@ -181,7 +180,7 @@ InitDeadLockChecking(void)
* limits the maximum recursion depth of DeadLockCheckRecurse. Making it
* really big might potentially allow a stack-overflow problem.
*/
- maxCurConstraints = max_backends;
+ maxCurConstraints = MaxBackends;
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
/*
@@ -192,7 +191,7 @@ InitDeadLockChecking(void)
* last MaxBackends entries in possibleConstraints[] are reserved as
* output workspace for FindLockCycle.
*/
- maxPossibleConstraints = max_backends * 4;
+ maxPossibleConstraints = MaxBackends * 4;
possibleConstraints =
(EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
@@ -328,7 +327,7 @@ DeadLockCheckRecurse(PGPROC *proc)
if (nCurConstraints >= maxCurConstraints)
return true; /* out of room for active constraints? */
oldPossibleConstraints = nPossibleConstraints;
- if (nPossibleConstraints + nEdges + GetMaxBackends() <= maxPossibleConstraints)
+ if (nPossibleConstraints + nEdges + MaxBackends <= maxPossibleConstraints)
{
/* We can save the edge list in possibleConstraints[] */
nPossibleConstraints += nEdges;
@@ -389,7 +388,7 @@ TestConfiguration(PGPROC *startProc)
/*
* Make sure we have room for FindLockCycle's output.
*/
- if (nPossibleConstraints + GetMaxBackends() > maxPossibleConstraints)
+ if (nPossibleConstraints + MaxBackends > maxPossibleConstraints)
return -1;
/*
@@ -487,7 +486,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
* record total length of cycle --- outer levels will now fill
* deadlockDetails[]
*/
- Assert(depth <= GetMaxBackends());
+ Assert(depth <= MaxBackends);
nDeadlockDetails = depth;
return true;
@@ -501,7 +500,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
}
}
/* Mark proc as seen */
- Assert(nVisitedProcs < GetMaxBackends());
+ Assert(nVisitedProcs < MaxBackends);
visitedProcs[nVisitedProcs++] = checkProc;
/*
@@ -699,7 +698,7 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
/*
* Add this edge to the list of soft edges in the cycle
*/
- Assert(*nSoftEdges < GetMaxBackends());
+ Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProcLeader;
softEdges[*nSoftEdges].blocker = leader;
softEdges[*nSoftEdges].lock = lock;
@@ -772,7 +771,7 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
/*
* Add this edge to the list of soft edges in the cycle
*/
- Assert(*nSoftEdges < GetMaxBackends());
+ Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProcLeader;
softEdges[*nSoftEdges].blocker = leader;
softEdges[*nSoftEdges].lock = lock;
@@ -835,7 +834,7 @@ ExpandConstraints(EDGE *constraints,
waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
waitOrders[nWaitOrders].nProcs = lock->waitProcs.size;
nWaitOrderProcs += lock->waitProcs.size;
- Assert(nWaitOrderProcs <= GetMaxBackends());
+ Assert(nWaitOrderProcs <= MaxBackends);
/*
* Do the topo sort. TopoSort need not examine constraints after this
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index ee2e15c17e9..5f5803f6814 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -55,7 +55,7 @@
int max_locks_per_xact; /* set by guc.c */
#define NLOCKENTS() \
- mul_size(max_locks_per_xact, add_size(GetMaxBackends(), max_prepared_xacts))
+ mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
/*
@@ -2924,7 +2924,6 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
LWLock *partitionLock;
int count = 0;
int fast_count = 0;
- int max_backends = GetMaxBackends();
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
@@ -2943,12 +2942,12 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
vxids = (VirtualTransactionId *)
MemoryContextAlloc(TopMemoryContext,
sizeof(VirtualTransactionId) *
- (max_backends + max_prepared_xacts + 1));
+ (MaxBackends + max_prepared_xacts + 1));
}
else
vxids = (VirtualTransactionId *)
palloc0(sizeof(VirtualTransactionId) *
- (max_backends + max_prepared_xacts + 1));
+ (MaxBackends + max_prepared_xacts + 1));
/* Compute hash code and partition lock, and look up conflicting modes. */
hashcode = LockTagHashCode(locktag);
@@ -3105,7 +3104,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
LWLockRelease(partitionLock);
- if (count > max_backends + max_prepared_xacts) /* should never happen */
+ if (count > MaxBackends + max_prepared_xacts) /* should never happen */
elog(PANIC, "too many conflicting locks found");
vxids[count].backendId = InvalidBackendId;
@@ -3652,12 +3651,11 @@ GetLockStatusData(void)
int els;
int el;
int i;
- int max_backends = GetMaxBackends();
data = (LockData *) palloc(sizeof(LockData));
/* Guess how much space we'll need. */
- els = max_backends;
+ els = MaxBackends;
el = 0;
data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
@@ -3691,7 +3689,7 @@ GetLockStatusData(void)
if (el >= els)
{
- els += max_backends;
+ els += MaxBackends;
data->locks = (LockInstanceData *)
repalloc(data->locks, sizeof(LockInstanceData) * els);
}
@@ -3723,7 +3721,7 @@ GetLockStatusData(void)
if (el >= els)
{
- els += max_backends;
+ els += MaxBackends;
data->locks = (LockInstanceData *)
repalloc(data->locks, sizeof(LockInstanceData) * els);
}
@@ -3852,7 +3850,7 @@ GetBlockerStatusData(int blocked_pid)
* for the procs[] array; the other two could need enlargement, though.)
*/
data->nprocs = data->nlocks = data->npids = 0;
- data->maxprocs = data->maxlocks = data->maxpids = GetMaxBackends();
+ data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
@@ -3927,7 +3925,6 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
PGPROC *proc;
int queue_size;
int i;
- int max_backends = GetMaxBackends();
/* Nothing to do if this proc is not blocked */
if (theLock == NULL)
@@ -3956,7 +3953,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
if (data->nlocks >= data->maxlocks)
{
- data->maxlocks += max_backends;
+ data->maxlocks += MaxBackends;
data->locks = (LockInstanceData *)
repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
}
@@ -3985,7 +3982,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
if (queue_size > data->maxpids - data->npids)
{
- data->maxpids = Max(data->maxpids + max_backends,
+ data->maxpids = Max(data->maxpids + MaxBackends,
data->npids + queue_size);
data->waiter_pids = (int *) repalloc(data->waiter_pids,
sizeof(int) * data->maxpids);
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index e337aad5b24..25e7e4e37bf 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -257,7 +257,7 @@
(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
#define NPREDICATELOCKTARGETENTS() \
- mul_size(max_predicate_locks_per_xact, add_size(GetMaxBackends(), max_prepared_xacts))
+ mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
#define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
@@ -1222,7 +1222,7 @@ InitPredicateLocks(void)
* Compute size for serializable transaction hashtable. Note these
* calculations must agree with PredicateLockShmemSize!
*/
- max_table_size = (GetMaxBackends() + max_prepared_xacts);
+ max_table_size = (MaxBackends + max_prepared_xacts);
/*
* Allocate a list to hold information on transactions participating in
@@ -1375,7 +1375,7 @@ PredicateLockShmemSize(void)
size = add_size(size, size / 10);
/* transaction list */
- max_table_size = GetMaxBackends() + max_prepared_xacts;
+ max_table_size = MaxBackends + max_prepared_xacts;
max_table_size *= 10;
size = add_size(size, PredXactListDataSize);
size = add_size(size, mul_size((Size) max_table_size,
@@ -1907,7 +1907,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
{
++(PredXact->WritableSxactCount);
Assert(PredXact->WritableSxactCount <=
- (GetMaxBackends() + max_prepared_xacts));
+ (MaxBackends + max_prepared_xacts));
}
MySerializableXact = sxact;
@@ -5111,7 +5111,7 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
{
++(PredXact->WritableSxactCount);
Assert(PredXact->WritableSxactCount <=
- (GetMaxBackends() + max_prepared_xacts));
+ (MaxBackends + max_prepared_xacts));
}
/*
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 93d082c45ee..37aaab13381 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -103,7 +103,7 @@ ProcGlobalShmemSize(void)
{
Size size = 0;
Size TotalProcs =
- add_size(GetMaxBackends(), add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+ add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
@@ -127,7 +127,7 @@ ProcGlobalSemas(void)
* We need a sema per backend (including autovacuum), plus one for each
* auxiliary process.
*/
- return GetMaxBackends() + NUM_AUXILIARY_PROCS;
+ return MaxBackends + NUM_AUXILIARY_PROCS;
}
/*
@@ -162,8 +162,7 @@ InitProcGlobal(void)
int i,
j;
bool found;
- int max_backends = GetMaxBackends();
- uint32 TotalProcs = max_backends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
+ uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
/* Create the ProcGlobal shared structure */
ProcGlobal = (PROC_HDR *)
@@ -196,7 +195,7 @@ InitProcGlobal(void)
MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
ProcGlobal->allProcs = procs;
/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
- ProcGlobal->allProcCount = max_backends + NUM_AUXILIARY_PROCS;
+ ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
/*
* Allocate arrays mirroring PGPROC fields in a dense manner. See
@@ -222,7 +221,7 @@ InitProcGlobal(void)
* dummy PGPROCs don't need these though - they're never associated
* with a real process
*/
- if (i < max_backends + NUM_AUXILIARY_PROCS)
+ if (i < MaxBackends + NUM_AUXILIARY_PROCS)
{
procs[i].sem = PGSemaphoreCreate();
InitSharedLatch(&(procs[i].procLatch));
@@ -259,7 +258,7 @@ InitProcGlobal(void)
ProcGlobal->bgworkerFreeProcs = &procs[i];
procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
}
- else if (i < max_backends)
+ else if (i < MaxBackends)
{
/* PGPROC for walsender, add to walsenderFreeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
@@ -287,8 +286,8 @@ InitProcGlobal(void)
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
* processes and prepared transactions.
*/
- AuxiliaryProcs = &procs[max_backends];
- PreparedXactProcs = &procs[max_backends + NUM_AUXILIARY_PROCS];
+ AuxiliaryProcs = &procs[MaxBackends];
+ PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
/* Create ProcStructLock spinlock, too */
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));