mirror of
https://github.com/postgres/postgres.git
synced 2025-09-02 04:21:28 +03:00
Revert the addition of GetMaxBackends() and related stuff.
This reverts commits0147fc7
,4567596
,aa64f23
, and5ecd018
. There is no longer agreement that introducing this function was the right way to address the problem. The consensus now seems to favor trying to make a correct value for MaxBackends available to mdules executing their _PG_init() functions. Nathan Bossart Discussion: http://postgr.es/m/20220323045229.i23skfscdbvrsuxa@jrouhaud
This commit is contained in:
@@ -166,7 +166,7 @@ dsm_postmaster_startup(PGShmemHeader *shim)
|
||||
|
||||
/* Determine size for new control segment. */
|
||||
maxitems = PG_DYNSHMEM_FIXED_SLOTS
|
||||
+ PG_DYNSHMEM_SLOTS_PER_BACKEND * GetMaxBackends();
|
||||
+ PG_DYNSHMEM_SLOTS_PER_BACKEND * MaxBackends;
|
||||
elog(DEBUG2, "dynamic shared memory system will support %u segments",
|
||||
maxitems);
|
||||
segsize = dsm_control_bytes_needed(maxitems);
|
||||
|
@@ -97,7 +97,7 @@ typedef struct ProcArrayStruct
|
||||
/* oldest catalog xmin of any replication slot */
|
||||
TransactionId replication_slot_catalog_xmin;
|
||||
|
||||
/* indexes into allProcs[], has ProcArrayMaxProcs entries */
|
||||
/* indexes into allProcs[], has PROCARRAY_MAXPROCS entries */
|
||||
int pgprocnos[FLEXIBLE_ARRAY_MEMBER];
|
||||
} ProcArrayStruct;
|
||||
|
||||
@@ -355,17 +355,6 @@ static void MaintainLatestCompletedXidRecovery(TransactionId latestXid);
|
||||
static inline FullTransactionId FullXidRelativeTo(FullTransactionId rel,
|
||||
TransactionId xid);
|
||||
static void GlobalVisUpdateApply(ComputeXidHorizonsResult *horizons);
|
||||
static inline int GetProcArrayMaxProcs(void);
|
||||
|
||||
|
||||
/*
|
||||
* Retrieve the number of slots in the ProcArray structure.
|
||||
*/
|
||||
static inline int
|
||||
GetProcArrayMaxProcs(void)
|
||||
{
|
||||
return GetMaxBackends() + max_prepared_xacts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report shared-memory space needed by CreateSharedProcArray.
|
||||
@@ -376,8 +365,10 @@ ProcArrayShmemSize(void)
|
||||
Size size;
|
||||
|
||||
/* Size of the ProcArray structure itself */
|
||||
#define PROCARRAY_MAXPROCS (MaxBackends + max_prepared_xacts)
|
||||
|
||||
size = offsetof(ProcArrayStruct, pgprocnos);
|
||||
size = add_size(size, mul_size(sizeof(int), GetProcArrayMaxProcs()));
|
||||
size = add_size(size, mul_size(sizeof(int), PROCARRAY_MAXPROCS));
|
||||
|
||||
/*
|
||||
* During Hot Standby processing we have a data structure called
|
||||
@@ -393,7 +384,7 @@ ProcArrayShmemSize(void)
|
||||
* shared memory is being set up.
|
||||
*/
|
||||
#define TOTAL_MAX_CACHED_SUBXIDS \
|
||||
((PGPROC_MAX_CACHED_SUBXIDS + 1) * GetProcArrayMaxProcs())
|
||||
((PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS)
|
||||
|
||||
if (EnableHotStandby)
|
||||
{
|
||||
@@ -420,7 +411,7 @@ CreateSharedProcArray(void)
|
||||
ShmemInitStruct("Proc Array",
|
||||
add_size(offsetof(ProcArrayStruct, pgprocnos),
|
||||
mul_size(sizeof(int),
|
||||
GetProcArrayMaxProcs())),
|
||||
PROCARRAY_MAXPROCS)),
|
||||
&found);
|
||||
|
||||
if (!found)
|
||||
@@ -429,7 +420,7 @@ CreateSharedProcArray(void)
|
||||
* We're the first - initialize.
|
||||
*/
|
||||
procArray->numProcs = 0;
|
||||
procArray->maxProcs = GetProcArrayMaxProcs();
|
||||
procArray->maxProcs = PROCARRAY_MAXPROCS;
|
||||
procArray->maxKnownAssignedXids = TOTAL_MAX_CACHED_SUBXIDS;
|
||||
procArray->numKnownAssignedXids = 0;
|
||||
procArray->tailKnownAssignedXids = 0;
|
||||
@@ -4645,7 +4636,7 @@ KnownAssignedXidsCompress(bool force)
|
||||
*/
|
||||
int nelements = head - tail;
|
||||
|
||||
if (nelements < 4 * GetProcArrayMaxProcs() ||
|
||||
if (nelements < 4 * PROCARRAY_MAXPROCS ||
|
||||
nelements < 2 * pArray->numKnownAssignedXids)
|
||||
return;
|
||||
}
|
||||
|
@@ -81,6 +81,13 @@ typedef struct
|
||||
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
|
||||
} ProcSignalHeader;
|
||||
|
||||
/*
|
||||
* We reserve a slot for each possible BackendId, plus one for each
|
||||
* possible auxiliary process type. (This scheme assumes there is not
|
||||
* more than one of any auxiliary process type at a time.)
|
||||
*/
|
||||
#define NumProcSignalSlots (MaxBackends + NUM_AUXPROCTYPES)
|
||||
|
||||
/* Check whether the relevant type bit is set in the flags. */
|
||||
#define BARRIER_SHOULD_CHECK(flags, type) \
|
||||
(((flags) & (((uint32) 1) << (uint32) (type))) != 0)
|
||||
@@ -95,20 +102,6 @@ static ProcSignalSlot *MyProcSignalSlot = NULL;
|
||||
static bool CheckProcSignal(ProcSignalReason reason);
|
||||
static void CleanupProcSignalState(int status, Datum arg);
|
||||
static void ResetProcSignalBarrierBits(uint32 flags);
|
||||
static inline int GetNumProcSignalSlots(void);
|
||||
|
||||
/*
|
||||
* GetNumProcSignalSlots
|
||||
*
|
||||
* We reserve a slot for each possible BackendId, plus one for each possible
|
||||
* auxiliary process type. (This scheme assume there is not more than one of
|
||||
* any auxiliary process type at a time.)
|
||||
*/
|
||||
static inline int
|
||||
GetNumProcSignalSlots(void)
|
||||
{
|
||||
return GetMaxBackends() + NUM_AUXPROCTYPES;
|
||||
}
|
||||
|
||||
/*
|
||||
* ProcSignalShmemSize
|
||||
@@ -119,7 +112,7 @@ ProcSignalShmemSize(void)
|
||||
{
|
||||
Size size;
|
||||
|
||||
size = mul_size(GetNumProcSignalSlots(), sizeof(ProcSignalSlot));
|
||||
size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
|
||||
size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
|
||||
return size;
|
||||
}
|
||||
@@ -133,7 +126,6 @@ ProcSignalShmemInit(void)
|
||||
{
|
||||
Size size = ProcSignalShmemSize();
|
||||
bool found;
|
||||
int numProcSignalSlots = GetNumProcSignalSlots();
|
||||
|
||||
ProcSignal = (ProcSignalHeader *)
|
||||
ShmemInitStruct("ProcSignal", size, &found);
|
||||
@@ -145,7 +137,7 @@ ProcSignalShmemInit(void)
|
||||
|
||||
pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
|
||||
|
||||
for (i = 0; i < numProcSignalSlots; ++i)
|
||||
for (i = 0; i < NumProcSignalSlots; ++i)
|
||||
{
|
||||
ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
|
||||
@@ -171,7 +163,7 @@ ProcSignalInit(int pss_idx)
|
||||
ProcSignalSlot *slot;
|
||||
uint64 barrier_generation;
|
||||
|
||||
Assert(pss_idx >= 1 && pss_idx <= GetNumProcSignalSlots());
|
||||
Assert(pss_idx >= 1 && pss_idx <= NumProcSignalSlots);
|
||||
|
||||
slot = &ProcSignal->psh_slot[pss_idx - 1];
|
||||
|
||||
@@ -300,7 +292,7 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = GetNumProcSignalSlots() - 1; i >= 0; i--)
|
||||
for (i = NumProcSignalSlots - 1; i >= 0; i--)
|
||||
{
|
||||
slot = &ProcSignal->psh_slot[i];
|
||||
|
||||
@@ -341,7 +333,6 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
|
||||
{
|
||||
uint32 flagbit = 1 << (uint32) type;
|
||||
uint64 generation;
|
||||
int numProcSignalSlots = GetNumProcSignalSlots();
|
||||
|
||||
/*
|
||||
* Set all the flags.
|
||||
@@ -351,7 +342,7 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
|
||||
* anything that we do afterwards. (This is also true of the later call to
|
||||
* pg_atomic_add_fetch_u64.)
|
||||
*/
|
||||
for (int i = 0; i < numProcSignalSlots; i++)
|
||||
for (int i = 0; i < NumProcSignalSlots; i++)
|
||||
{
|
||||
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
|
||||
@@ -377,7 +368,7 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
|
||||
* backends that need to update state - but they won't actually need to
|
||||
* change any state.
|
||||
*/
|
||||
for (int i = numProcSignalSlots - 1; i >= 0; i--)
|
||||
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
|
||||
{
|
||||
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
pid_t pid = slot->pss_pid;
|
||||
@@ -402,7 +393,7 @@ WaitForProcSignalBarrier(uint64 generation)
|
||||
{
|
||||
Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
|
||||
|
||||
for (int i = GetNumProcSignalSlots() - 1; i >= 0; i--)
|
||||
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
|
||||
{
|
||||
ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
uint64 oldval;
|
||||
|
@@ -213,7 +213,7 @@ SInvalShmemSize(void)
|
||||
* free slot. This is because the autovacuum launcher and worker processes,
|
||||
* which are included in MaxBackends, are not started in Hot Standby mode.
|
||||
*/
|
||||
size = add_size(size, mul_size(sizeof(ProcState), GetMaxBackends()));
|
||||
size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
|
||||
|
||||
return size;
|
||||
}
|
||||
@@ -239,7 +239,7 @@ CreateSharedInvalidationState(void)
|
||||
shmInvalBuffer->maxMsgNum = 0;
|
||||
shmInvalBuffer->nextThreshold = CLEANUP_MIN;
|
||||
shmInvalBuffer->lastBackend = 0;
|
||||
shmInvalBuffer->maxBackends = GetMaxBackends();
|
||||
shmInvalBuffer->maxBackends = MaxBackends;
|
||||
SpinLockInit(&shmInvalBuffer->msgnumLock);
|
||||
|
||||
/* The buffer[] array is initially all unused, so we need not fill it */
|
||||
|
@@ -143,7 +143,6 @@ void
|
||||
InitDeadLockChecking(void)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
/* Make sure allocations are permanent */
|
||||
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
|
||||
@@ -152,16 +151,16 @@ InitDeadLockChecking(void)
|
||||
* FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
|
||||
* deadlockDetails[].
|
||||
*/
|
||||
visitedProcs = (PGPROC **) palloc(max_backends * sizeof(PGPROC *));
|
||||
deadlockDetails = (DEADLOCK_INFO *) palloc(max_backends * sizeof(DEADLOCK_INFO));
|
||||
visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
|
||||
deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
|
||||
|
||||
/*
|
||||
* TopoSort needs to consider at most MaxBackends wait-queue entries, and
|
||||
* it needn't run concurrently with FindLockCycle.
|
||||
*/
|
||||
topoProcs = visitedProcs; /* re-use this space */
|
||||
beforeConstraints = (int *) palloc(max_backends * sizeof(int));
|
||||
afterConstraints = (int *) palloc(max_backends * sizeof(int));
|
||||
beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
|
||||
afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
|
||||
|
||||
/*
|
||||
* We need to consider rearranging at most MaxBackends/2 wait queues
|
||||
@@ -170,8 +169,8 @@ InitDeadLockChecking(void)
|
||||
* MaxBackends total waiters.
|
||||
*/
|
||||
waitOrders = (WAIT_ORDER *)
|
||||
palloc((max_backends / 2) * sizeof(WAIT_ORDER));
|
||||
waitOrderProcs = (PGPROC **) palloc(max_backends * sizeof(PGPROC *));
|
||||
palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
|
||||
waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
|
||||
|
||||
/*
|
||||
* Allow at most MaxBackends distinct constraints in a configuration. (Is
|
||||
@@ -181,7 +180,7 @@ InitDeadLockChecking(void)
|
||||
* limits the maximum recursion depth of DeadLockCheckRecurse. Making it
|
||||
* really big might potentially allow a stack-overflow problem.
|
||||
*/
|
||||
maxCurConstraints = max_backends;
|
||||
maxCurConstraints = MaxBackends;
|
||||
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
|
||||
|
||||
/*
|
||||
@@ -192,7 +191,7 @@ InitDeadLockChecking(void)
|
||||
* last MaxBackends entries in possibleConstraints[] are reserved as
|
||||
* output workspace for FindLockCycle.
|
||||
*/
|
||||
maxPossibleConstraints = max_backends * 4;
|
||||
maxPossibleConstraints = MaxBackends * 4;
|
||||
possibleConstraints =
|
||||
(EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
|
||||
|
||||
@@ -328,7 +327,7 @@ DeadLockCheckRecurse(PGPROC *proc)
|
||||
if (nCurConstraints >= maxCurConstraints)
|
||||
return true; /* out of room for active constraints? */
|
||||
oldPossibleConstraints = nPossibleConstraints;
|
||||
if (nPossibleConstraints + nEdges + GetMaxBackends() <= maxPossibleConstraints)
|
||||
if (nPossibleConstraints + nEdges + MaxBackends <= maxPossibleConstraints)
|
||||
{
|
||||
/* We can save the edge list in possibleConstraints[] */
|
||||
nPossibleConstraints += nEdges;
|
||||
@@ -389,7 +388,7 @@ TestConfiguration(PGPROC *startProc)
|
||||
/*
|
||||
* Make sure we have room for FindLockCycle's output.
|
||||
*/
|
||||
if (nPossibleConstraints + GetMaxBackends() > maxPossibleConstraints)
|
||||
if (nPossibleConstraints + MaxBackends > maxPossibleConstraints)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
@@ -487,7 +486,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
|
||||
* record total length of cycle --- outer levels will now fill
|
||||
* deadlockDetails[]
|
||||
*/
|
||||
Assert(depth <= GetMaxBackends());
|
||||
Assert(depth <= MaxBackends);
|
||||
nDeadlockDetails = depth;
|
||||
|
||||
return true;
|
||||
@@ -501,7 +500,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
|
||||
}
|
||||
}
|
||||
/* Mark proc as seen */
|
||||
Assert(nVisitedProcs < GetMaxBackends());
|
||||
Assert(nVisitedProcs < MaxBackends);
|
||||
visitedProcs[nVisitedProcs++] = checkProc;
|
||||
|
||||
/*
|
||||
@@ -699,7 +698,7 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
|
||||
/*
|
||||
* Add this edge to the list of soft edges in the cycle
|
||||
*/
|
||||
Assert(*nSoftEdges < GetMaxBackends());
|
||||
Assert(*nSoftEdges < MaxBackends);
|
||||
softEdges[*nSoftEdges].waiter = checkProcLeader;
|
||||
softEdges[*nSoftEdges].blocker = leader;
|
||||
softEdges[*nSoftEdges].lock = lock;
|
||||
@@ -772,7 +771,7 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
|
||||
/*
|
||||
* Add this edge to the list of soft edges in the cycle
|
||||
*/
|
||||
Assert(*nSoftEdges < GetMaxBackends());
|
||||
Assert(*nSoftEdges < MaxBackends);
|
||||
softEdges[*nSoftEdges].waiter = checkProcLeader;
|
||||
softEdges[*nSoftEdges].blocker = leader;
|
||||
softEdges[*nSoftEdges].lock = lock;
|
||||
@@ -835,7 +834,7 @@ ExpandConstraints(EDGE *constraints,
|
||||
waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
|
||||
waitOrders[nWaitOrders].nProcs = lock->waitProcs.size;
|
||||
nWaitOrderProcs += lock->waitProcs.size;
|
||||
Assert(nWaitOrderProcs <= GetMaxBackends());
|
||||
Assert(nWaitOrderProcs <= MaxBackends);
|
||||
|
||||
/*
|
||||
* Do the topo sort. TopoSort need not examine constraints after this
|
||||
|
@@ -55,7 +55,7 @@
|
||||
int max_locks_per_xact; /* set by guc.c */
|
||||
|
||||
#define NLOCKENTS() \
|
||||
mul_size(max_locks_per_xact, add_size(GetMaxBackends(), max_prepared_xacts))
|
||||
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
|
||||
|
||||
|
||||
/*
|
||||
@@ -2924,7 +2924,6 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
||||
LWLock *partitionLock;
|
||||
int count = 0;
|
||||
int fast_count = 0;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
|
||||
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
|
||||
@@ -2943,12 +2942,12 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
||||
vxids = (VirtualTransactionId *)
|
||||
MemoryContextAlloc(TopMemoryContext,
|
||||
sizeof(VirtualTransactionId) *
|
||||
(max_backends + max_prepared_xacts + 1));
|
||||
(MaxBackends + max_prepared_xacts + 1));
|
||||
}
|
||||
else
|
||||
vxids = (VirtualTransactionId *)
|
||||
palloc0(sizeof(VirtualTransactionId) *
|
||||
(max_backends + max_prepared_xacts + 1));
|
||||
(MaxBackends + max_prepared_xacts + 1));
|
||||
|
||||
/* Compute hash code and partition lock, and look up conflicting modes. */
|
||||
hashcode = LockTagHashCode(locktag);
|
||||
@@ -3105,7 +3104,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
||||
|
||||
LWLockRelease(partitionLock);
|
||||
|
||||
if (count > max_backends + max_prepared_xacts) /* should never happen */
|
||||
if (count > MaxBackends + max_prepared_xacts) /* should never happen */
|
||||
elog(PANIC, "too many conflicting locks found");
|
||||
|
||||
vxids[count].backendId = InvalidBackendId;
|
||||
@@ -3652,12 +3651,11 @@ GetLockStatusData(void)
|
||||
int els;
|
||||
int el;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
data = (LockData *) palloc(sizeof(LockData));
|
||||
|
||||
/* Guess how much space we'll need. */
|
||||
els = max_backends;
|
||||
els = MaxBackends;
|
||||
el = 0;
|
||||
data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
|
||||
|
||||
@@ -3691,7 +3689,7 @@ GetLockStatusData(void)
|
||||
|
||||
if (el >= els)
|
||||
{
|
||||
els += max_backends;
|
||||
els += MaxBackends;
|
||||
data->locks = (LockInstanceData *)
|
||||
repalloc(data->locks, sizeof(LockInstanceData) * els);
|
||||
}
|
||||
@@ -3723,7 +3721,7 @@ GetLockStatusData(void)
|
||||
|
||||
if (el >= els)
|
||||
{
|
||||
els += max_backends;
|
||||
els += MaxBackends;
|
||||
data->locks = (LockInstanceData *)
|
||||
repalloc(data->locks, sizeof(LockInstanceData) * els);
|
||||
}
|
||||
@@ -3852,7 +3850,7 @@ GetBlockerStatusData(int blocked_pid)
|
||||
* for the procs[] array; the other two could need enlargement, though.)
|
||||
*/
|
||||
data->nprocs = data->nlocks = data->npids = 0;
|
||||
data->maxprocs = data->maxlocks = data->maxpids = GetMaxBackends();
|
||||
data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
|
||||
data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
|
||||
data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
|
||||
data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
|
||||
@@ -3927,7 +3925,6 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
|
||||
PGPROC *proc;
|
||||
int queue_size;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
/* Nothing to do if this proc is not blocked */
|
||||
if (theLock == NULL)
|
||||
@@ -3956,7 +3953,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
|
||||
|
||||
if (data->nlocks >= data->maxlocks)
|
||||
{
|
||||
data->maxlocks += max_backends;
|
||||
data->maxlocks += MaxBackends;
|
||||
data->locks = (LockInstanceData *)
|
||||
repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
|
||||
}
|
||||
@@ -3985,7 +3982,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
|
||||
|
||||
if (queue_size > data->maxpids - data->npids)
|
||||
{
|
||||
data->maxpids = Max(data->maxpids + max_backends,
|
||||
data->maxpids = Max(data->maxpids + MaxBackends,
|
||||
data->npids + queue_size);
|
||||
data->waiter_pids = (int *) repalloc(data->waiter_pids,
|
||||
sizeof(int) * data->maxpids);
|
||||
|
@@ -257,7 +257,7 @@
|
||||
(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
|
||||
|
||||
#define NPREDICATELOCKTARGETENTS() \
|
||||
mul_size(max_predicate_locks_per_xact, add_size(GetMaxBackends(), max_prepared_xacts))
|
||||
mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
|
||||
|
||||
#define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
|
||||
|
||||
@@ -1222,7 +1222,7 @@ InitPredicateLocks(void)
|
||||
* Compute size for serializable transaction hashtable. Note these
|
||||
* calculations must agree with PredicateLockShmemSize!
|
||||
*/
|
||||
max_table_size = (GetMaxBackends() + max_prepared_xacts);
|
||||
max_table_size = (MaxBackends + max_prepared_xacts);
|
||||
|
||||
/*
|
||||
* Allocate a list to hold information on transactions participating in
|
||||
@@ -1375,7 +1375,7 @@ PredicateLockShmemSize(void)
|
||||
size = add_size(size, size / 10);
|
||||
|
||||
/* transaction list */
|
||||
max_table_size = GetMaxBackends() + max_prepared_xacts;
|
||||
max_table_size = MaxBackends + max_prepared_xacts;
|
||||
max_table_size *= 10;
|
||||
size = add_size(size, PredXactListDataSize);
|
||||
size = add_size(size, mul_size((Size) max_table_size,
|
||||
@@ -1907,7 +1907,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
|
||||
{
|
||||
++(PredXact->WritableSxactCount);
|
||||
Assert(PredXact->WritableSxactCount <=
|
||||
(GetMaxBackends() + max_prepared_xacts));
|
||||
(MaxBackends + max_prepared_xacts));
|
||||
}
|
||||
|
||||
MySerializableXact = sxact;
|
||||
@@ -5111,7 +5111,7 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
|
||||
{
|
||||
++(PredXact->WritableSxactCount);
|
||||
Assert(PredXact->WritableSxactCount <=
|
||||
(GetMaxBackends() + max_prepared_xacts));
|
||||
(MaxBackends + max_prepared_xacts));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -103,7 +103,7 @@ ProcGlobalShmemSize(void)
|
||||
{
|
||||
Size size = 0;
|
||||
Size TotalProcs =
|
||||
add_size(GetMaxBackends(), add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
|
||||
add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
|
||||
|
||||
/* ProcGlobal */
|
||||
size = add_size(size, sizeof(PROC_HDR));
|
||||
@@ -127,7 +127,7 @@ ProcGlobalSemas(void)
|
||||
* We need a sema per backend (including autovacuum), plus one for each
|
||||
* auxiliary process.
|
||||
*/
|
||||
return GetMaxBackends() + NUM_AUXILIARY_PROCS;
|
||||
return MaxBackends + NUM_AUXILIARY_PROCS;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -162,8 +162,7 @@ InitProcGlobal(void)
|
||||
int i,
|
||||
j;
|
||||
bool found;
|
||||
int max_backends = GetMaxBackends();
|
||||
uint32 TotalProcs = max_backends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
|
||||
uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
|
||||
|
||||
/* Create the ProcGlobal shared structure */
|
||||
ProcGlobal = (PROC_HDR *)
|
||||
@@ -196,7 +195,7 @@ InitProcGlobal(void)
|
||||
MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
|
||||
ProcGlobal->allProcs = procs;
|
||||
/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
|
||||
ProcGlobal->allProcCount = max_backends + NUM_AUXILIARY_PROCS;
|
||||
ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
|
||||
|
||||
/*
|
||||
* Allocate arrays mirroring PGPROC fields in a dense manner. See
|
||||
@@ -222,7 +221,7 @@ InitProcGlobal(void)
|
||||
* dummy PGPROCs don't need these though - they're never associated
|
||||
* with a real process
|
||||
*/
|
||||
if (i < max_backends + NUM_AUXILIARY_PROCS)
|
||||
if (i < MaxBackends + NUM_AUXILIARY_PROCS)
|
||||
{
|
||||
procs[i].sem = PGSemaphoreCreate();
|
||||
InitSharedLatch(&(procs[i].procLatch));
|
||||
@@ -259,7 +258,7 @@ InitProcGlobal(void)
|
||||
ProcGlobal->bgworkerFreeProcs = &procs[i];
|
||||
procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
|
||||
}
|
||||
else if (i < max_backends)
|
||||
else if (i < MaxBackends)
|
||||
{
|
||||
/* PGPROC for walsender, add to walsenderFreeProcs list */
|
||||
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
|
||||
@@ -287,8 +286,8 @@ InitProcGlobal(void)
|
||||
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
|
||||
* processes and prepared transactions.
|
||||
*/
|
||||
AuxiliaryProcs = &procs[max_backends];
|
||||
PreparedXactProcs = &procs[max_backends + NUM_AUXILIARY_PROCS];
|
||||
AuxiliaryProcs = &procs[MaxBackends];
|
||||
PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
|
||||
|
||||
/* Create ProcStructLock spinlock, too */
|
||||
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
|
||||
|
Reference in New Issue
Block a user