Remove MaxBackends variable in favor of GetMaxBackends() function.
Previously, it was really easy to write code that accessed MaxBackends before we'd actually initialized it, especially when coding up an extension. To make this less error-prune, introduce a new function GetMaxBackends() which should be used to obtain the correct value. This will ERROR if called too early. Demote the global variable to a file-level static, so that nobody can peak at it directly. Nathan Bossart. Idea by Andres Freund. Review by Greg Sabino Mullane, by Michael Paquier (who had doubts about the approach), and by me. Discussion: http://postgr.es/m/20210802224204.bckcikl45uezv5e4@alap3.anarazel.de
This commit is contained in:
parent
2da896182c
commit
aa64f23b02
@ -2072,7 +2072,7 @@ BTreeShmemSize(void)
|
||||
Size size;
|
||||
|
||||
size = offsetof(BTVacInfo, vacuums);
|
||||
size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
|
||||
size = add_size(size, mul_size(GetMaxBackends(), sizeof(BTOneVacInfo)));
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -2101,7 +2101,7 @@ BTreeShmemInit(void)
|
||||
btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
|
||||
|
||||
btvacinfo->num_vacuums = 0;
|
||||
btvacinfo->max_vacuums = MaxBackends;
|
||||
btvacinfo->max_vacuums = GetMaxBackends();
|
||||
}
|
||||
else
|
||||
Assert(found);
|
||||
|
@ -282,12 +282,11 @@ typedef struct MultiXactStateData
|
||||
} MultiXactStateData;
|
||||
|
||||
/*
|
||||
* Last element of OldestMemberMXactId and OldestVisibleMXactId arrays.
|
||||
* Valid elements are (1..MaxOldestSlot); element 0 is never used.
|
||||
* Pointers to the state data in shared memory
|
||||
*
|
||||
* The index of the last element of the OldestMemberMXactId and
|
||||
* OldestVisibleMXacId arrays can be obtained with GetMaxOldestSlot().
|
||||
*/
|
||||
#define MaxOldestSlot (MaxBackends + max_prepared_xacts)
|
||||
|
||||
/* Pointers to the state data in shared memory */
|
||||
static MultiXactStateData *MultiXactState;
|
||||
static MultiXactId *OldestMemberMXactId;
|
||||
static MultiXactId *OldestVisibleMXactId;
|
||||
@ -342,6 +341,7 @@ static void MultiXactIdSetOldestVisible(void);
|
||||
static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
|
||||
int nmembers, MultiXactMember *members);
|
||||
static MultiXactId GetNewMultiXactId(int nmembers, MultiXactOffset *offset);
|
||||
static inline int GetMaxOldestSlot(void);
|
||||
|
||||
/* MultiXact cache management */
|
||||
static int mxactMemberComparator(const void *arg1, const void *arg2);
|
||||
@ -662,6 +662,17 @@ MultiXactIdSetOldestMember(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve the index of the last element of the OldestMemberMXactId and
|
||||
* OldestVisibleMXactId arrays. Valid elements are (1..MaxOldestSlot); element
|
||||
* 0 is never used.
|
||||
*/
|
||||
static inline int
|
||||
GetMaxOldestSlot(void)
|
||||
{
|
||||
return GetMaxBackends() + max_prepared_xacts;
|
||||
}
|
||||
|
||||
/*
|
||||
* MultiXactIdSetOldestVisible
|
||||
* Save the oldest MultiXactId this transaction considers possibly live.
|
||||
@ -684,6 +695,7 @@ MultiXactIdSetOldestVisible(void)
|
||||
if (!MultiXactIdIsValid(OldestVisibleMXactId[MyBackendId]))
|
||||
{
|
||||
MultiXactId oldestMXact;
|
||||
int maxOldestSlot = GetMaxOldestSlot();
|
||||
int i;
|
||||
|
||||
LWLockAcquire(MultiXactGenLock, LW_EXCLUSIVE);
|
||||
@ -697,7 +709,7 @@ MultiXactIdSetOldestVisible(void)
|
||||
if (oldestMXact < FirstMultiXactId)
|
||||
oldestMXact = FirstMultiXactId;
|
||||
|
||||
for (i = 1; i <= MaxOldestSlot; i++)
|
||||
for (i = 1; i <= maxOldestSlot; i++)
|
||||
{
|
||||
MultiXactId thisoldest = OldestMemberMXactId[i];
|
||||
|
||||
@ -1831,7 +1843,7 @@ MultiXactShmemSize(void)
|
||||
/* We need 2*MaxOldestSlot + 1 perBackendXactIds[] entries */
|
||||
#define SHARED_MULTIXACT_STATE_SIZE \
|
||||
add_size(offsetof(MultiXactStateData, perBackendXactIds) + sizeof(MultiXactId), \
|
||||
mul_size(sizeof(MultiXactId) * 2, MaxOldestSlot))
|
||||
mul_size(sizeof(MultiXactId) * 2, GetMaxOldestSlot()))
|
||||
|
||||
size = SHARED_MULTIXACT_STATE_SIZE;
|
||||
size = add_size(size, SimpleLruShmemSize(NUM_MULTIXACTOFFSET_BUFFERS, 0));
|
||||
@ -1882,7 +1894,7 @@ MultiXactShmemInit(void)
|
||||
* since we only use indexes 1..MaxOldestSlot in each array.
|
||||
*/
|
||||
OldestMemberMXactId = MultiXactState->perBackendXactIds;
|
||||
OldestVisibleMXactId = OldestMemberMXactId + MaxOldestSlot;
|
||||
OldestVisibleMXactId = OldestMemberMXactId + GetMaxOldestSlot();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2507,6 +2519,7 @@ GetOldestMultiXactId(void)
|
||||
{
|
||||
MultiXactId oldestMXact;
|
||||
MultiXactId nextMXact;
|
||||
int maxOldestSlot = GetMaxOldestSlot();
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -2525,7 +2538,7 @@ GetOldestMultiXactId(void)
|
||||
nextMXact = FirstMultiXactId;
|
||||
|
||||
oldestMXact = nextMXact;
|
||||
for (i = 1; i <= MaxOldestSlot; i++)
|
||||
for (i = 1; i <= maxOldestSlot; i++)
|
||||
{
|
||||
MultiXactId thisoldest;
|
||||
|
||||
|
@ -260,6 +260,7 @@ TwoPhaseShmemInit(void)
|
||||
{
|
||||
GlobalTransaction gxacts;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
Assert(!found);
|
||||
TwoPhaseState->freeGXacts = NULL;
|
||||
@ -293,7 +294,7 @@ TwoPhaseShmemInit(void)
|
||||
* prepared transaction. Currently multixact.c uses that
|
||||
* technique.
|
||||
*/
|
||||
gxacts[i].dummyBackendId = MaxBackends + 1 + i;
|
||||
gxacts[i].dummyBackendId = max_backends + 1 + i;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -518,7 +518,7 @@ AsyncShmemSize(void)
|
||||
Size size;
|
||||
|
||||
/* This had better match AsyncShmemInit */
|
||||
size = mul_size(MaxBackends + 1, sizeof(QueueBackendStatus));
|
||||
size = mul_size(GetMaxBackends() + 1, sizeof(QueueBackendStatus));
|
||||
size = add_size(size, offsetof(AsyncQueueControl, backend));
|
||||
|
||||
size = add_size(size, SimpleLruShmemSize(NUM_NOTIFY_BUFFERS, 0));
|
||||
@ -534,6 +534,7 @@ AsyncShmemInit(void)
|
||||
{
|
||||
bool found;
|
||||
Size size;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
/*
|
||||
* Create or attach to the AsyncQueueControl structure.
|
||||
@ -541,7 +542,7 @@ AsyncShmemInit(void)
|
||||
* The used entries in the backend[] array run from 1 to MaxBackends; the
|
||||
* zero'th entry is unused but must be allocated.
|
||||
*/
|
||||
size = mul_size(MaxBackends + 1, sizeof(QueueBackendStatus));
|
||||
size = mul_size(max_backends + 1, sizeof(QueueBackendStatus));
|
||||
size = add_size(size, offsetof(AsyncQueueControl, backend));
|
||||
|
||||
asyncQueueControl = (AsyncQueueControl *)
|
||||
@ -556,7 +557,7 @@ AsyncShmemInit(void)
|
||||
QUEUE_FIRST_LISTENER = InvalidBackendId;
|
||||
asyncQueueControl->lastQueueFillWarn = 0;
|
||||
/* zero'th entry won't be used, but let's initialize it anyway */
|
||||
for (int i = 0; i <= MaxBackends; i++)
|
||||
for (int i = 0; i <= max_backends; i++)
|
||||
{
|
||||
QUEUE_BACKEND_PID(i) = InvalidPid;
|
||||
QUEUE_BACKEND_DBOID(i) = InvalidOid;
|
||||
@ -1641,8 +1642,8 @@ SignalBackends(void)
|
||||
* XXX in principle these pallocs could fail, which would be bad. Maybe
|
||||
* preallocate the arrays? They're not that large, though.
|
||||
*/
|
||||
pids = (int32 *) palloc(MaxBackends * sizeof(int32));
|
||||
ids = (BackendId *) palloc(MaxBackends * sizeof(BackendId));
|
||||
pids = (int32 *) palloc(GetMaxBackends() * sizeof(int32));
|
||||
ids = (BackendId *) palloc(GetMaxBackends() * sizeof(BackendId));
|
||||
count = 0;
|
||||
|
||||
LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
|
||||
|
@ -349,6 +349,7 @@ StreamServerPort(int family, const char *hostName, unsigned short portNumber,
|
||||
struct addrinfo hint;
|
||||
int listen_index = 0;
|
||||
int added = 0;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
char unixSocketPath[MAXPGPATH];
|
||||
@ -571,7 +572,7 @@ StreamServerPort(int family, const char *hostName, unsigned short portNumber,
|
||||
* intended to provide a clamp on the request on platforms where an
|
||||
* overly large request provokes a kernel error (are there any?).
|
||||
*/
|
||||
maxconn = MaxBackends * 2;
|
||||
maxconn = max_backends * 2;
|
||||
if (maxconn > PG_SOMAXCONN)
|
||||
maxconn = PG_SOMAXCONN;
|
||||
|
||||
|
@ -116,7 +116,7 @@ AuxiliaryProcessMain(AuxProcType auxtype)
|
||||
* This will need rethinking if we ever want more than one of a particular
|
||||
* auxiliary process type.
|
||||
*/
|
||||
ProcSignalInit(MaxBackends + MyAuxProcType + 1);
|
||||
ProcSignalInit(GetMaxBackends() + MyAuxProcType + 1);
|
||||
|
||||
/*
|
||||
* Auxiliary processes don't run transactions, but they may need a
|
||||
|
@ -6260,7 +6260,7 @@ save_backend_variables(BackendParameters *param, Port *port,
|
||||
param->query_id_enabled = query_id_enabled;
|
||||
param->max_safe_fds = max_safe_fds;
|
||||
|
||||
param->MaxBackends = MaxBackends;
|
||||
param->MaxBackends = GetMaxBackends();
|
||||
|
||||
#ifdef WIN32
|
||||
param->PostmasterHandle = PostmasterHandle;
|
||||
@ -6494,7 +6494,7 @@ restore_backend_variables(BackendParameters *param, Port *port)
|
||||
query_id_enabled = param->query_id_enabled;
|
||||
max_safe_fds = param->max_safe_fds;
|
||||
|
||||
MaxBackends = param->MaxBackends;
|
||||
SetMaxBackends(param->MaxBackends);
|
||||
|
||||
#ifdef WIN32
|
||||
PostmasterHandle = param->PostmasterHandle;
|
||||
|
@ -166,7 +166,7 @@ dsm_postmaster_startup(PGShmemHeader *shim)
|
||||
|
||||
/* Determine size for new control segment. */
|
||||
maxitems = PG_DYNSHMEM_FIXED_SLOTS
|
||||
+ PG_DYNSHMEM_SLOTS_PER_BACKEND * MaxBackends;
|
||||
+ PG_DYNSHMEM_SLOTS_PER_BACKEND * GetMaxBackends();
|
||||
elog(DEBUG2, "dynamic shared memory system will support %u segments",
|
||||
maxitems);
|
||||
segsize = dsm_control_bytes_needed(maxitems);
|
||||
|
@ -97,7 +97,7 @@ typedef struct ProcArrayStruct
|
||||
/* oldest catalog xmin of any replication slot */
|
||||
TransactionId replication_slot_catalog_xmin;
|
||||
|
||||
/* indexes into allProcs[], has PROCARRAY_MAXPROCS entries */
|
||||
/* indexes into allProcs[], has ProcArrayMaxProcs entries */
|
||||
int pgprocnos[FLEXIBLE_ARRAY_MEMBER];
|
||||
} ProcArrayStruct;
|
||||
|
||||
@ -355,6 +355,17 @@ static void MaintainLatestCompletedXidRecovery(TransactionId latestXid);
|
||||
static inline FullTransactionId FullXidRelativeTo(FullTransactionId rel,
|
||||
TransactionId xid);
|
||||
static void GlobalVisUpdateApply(ComputeXidHorizonsResult *horizons);
|
||||
static inline int GetProcArrayMaxProcs(void);
|
||||
|
||||
|
||||
/*
|
||||
* Retrieve the number of slots in the ProcArray structure.
|
||||
*/
|
||||
static inline int
|
||||
GetProcArrayMaxProcs(void)
|
||||
{
|
||||
return GetMaxBackends() + max_prepared_xacts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report shared-memory space needed by CreateSharedProcArray.
|
||||
@ -365,10 +376,8 @@ ProcArrayShmemSize(void)
|
||||
Size size;
|
||||
|
||||
/* Size of the ProcArray structure itself */
|
||||
#define PROCARRAY_MAXPROCS (MaxBackends + max_prepared_xacts)
|
||||
|
||||
size = offsetof(ProcArrayStruct, pgprocnos);
|
||||
size = add_size(size, mul_size(sizeof(int), PROCARRAY_MAXPROCS));
|
||||
size = add_size(size, mul_size(sizeof(int), GetProcArrayMaxProcs()));
|
||||
|
||||
/*
|
||||
* During Hot Standby processing we have a data structure called
|
||||
@ -384,7 +393,7 @@ ProcArrayShmemSize(void)
|
||||
* shared memory is being set up.
|
||||
*/
|
||||
#define TOTAL_MAX_CACHED_SUBXIDS \
|
||||
((PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS)
|
||||
((PGPROC_MAX_CACHED_SUBXIDS + 1) * GetProcArrayMaxProcs())
|
||||
|
||||
if (EnableHotStandby)
|
||||
{
|
||||
@ -411,7 +420,7 @@ CreateSharedProcArray(void)
|
||||
ShmemInitStruct("Proc Array",
|
||||
add_size(offsetof(ProcArrayStruct, pgprocnos),
|
||||
mul_size(sizeof(int),
|
||||
PROCARRAY_MAXPROCS)),
|
||||
GetProcArrayMaxProcs())),
|
||||
&found);
|
||||
|
||||
if (!found)
|
||||
@ -420,7 +429,7 @@ CreateSharedProcArray(void)
|
||||
* We're the first - initialize.
|
||||
*/
|
||||
procArray->numProcs = 0;
|
||||
procArray->maxProcs = PROCARRAY_MAXPROCS;
|
||||
procArray->maxProcs = GetProcArrayMaxProcs();
|
||||
procArray->maxKnownAssignedXids = TOTAL_MAX_CACHED_SUBXIDS;
|
||||
procArray->numKnownAssignedXids = 0;
|
||||
procArray->tailKnownAssignedXids = 0;
|
||||
@ -4623,7 +4632,7 @@ KnownAssignedXidsCompress(bool force)
|
||||
*/
|
||||
int nelements = head - tail;
|
||||
|
||||
if (nelements < 4 * PROCARRAY_MAXPROCS ||
|
||||
if (nelements < 4 * GetProcArrayMaxProcs() ||
|
||||
nelements < 2 * pArray->numKnownAssignedXids)
|
||||
return;
|
||||
}
|
||||
|
@ -80,13 +80,6 @@ typedef struct
|
||||
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
|
||||
} ProcSignalHeader;
|
||||
|
||||
/*
|
||||
* We reserve a slot for each possible BackendId, plus one for each
|
||||
* possible auxiliary process type. (This scheme assumes there is not
|
||||
* more than one of any auxiliary process type at a time.)
|
||||
*/
|
||||
#define NumProcSignalSlots (MaxBackends + NUM_AUXPROCTYPES)
|
||||
|
||||
/* Check whether the relevant type bit is set in the flags. */
|
||||
#define BARRIER_SHOULD_CHECK(flags, type) \
|
||||
(((flags) & (((uint32) 1) << (uint32) (type))) != 0)
|
||||
@ -102,6 +95,20 @@ static bool CheckProcSignal(ProcSignalReason reason);
|
||||
static void CleanupProcSignalState(int status, Datum arg);
|
||||
static void ResetProcSignalBarrierBits(uint32 flags);
|
||||
static bool ProcessBarrierPlaceholder(void);
|
||||
static inline int GetNumProcSignalSlots(void);
|
||||
|
||||
/*
|
||||
* GetNumProcSignalSlots
|
||||
*
|
||||
* We reserve a slot for each possible BackendId, plus one for each possible
|
||||
* auxiliary process type. (This scheme assume there is not more than one of
|
||||
* any auxiliary process type at a time.)
|
||||
*/
|
||||
static inline int
|
||||
GetNumProcSignalSlots(void)
|
||||
{
|
||||
return GetMaxBackends() + NUM_AUXPROCTYPES;
|
||||
}
|
||||
|
||||
/*
|
||||
* ProcSignalShmemSize
|
||||
@ -112,7 +119,7 @@ ProcSignalShmemSize(void)
|
||||
{
|
||||
Size size;
|
||||
|
||||
size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
|
||||
size = mul_size(GetNumProcSignalSlots(), sizeof(ProcSignalSlot));
|
||||
size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
|
||||
return size;
|
||||
}
|
||||
@ -126,6 +133,7 @@ ProcSignalShmemInit(void)
|
||||
{
|
||||
Size size = ProcSignalShmemSize();
|
||||
bool found;
|
||||
int numProcSignalSlots = GetNumProcSignalSlots();
|
||||
|
||||
ProcSignal = (ProcSignalHeader *)
|
||||
ShmemInitStruct("ProcSignal", size, &found);
|
||||
@ -137,7 +145,7 @@ ProcSignalShmemInit(void)
|
||||
|
||||
pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
|
||||
|
||||
for (i = 0; i < NumProcSignalSlots; ++i)
|
||||
for (i = 0; i < numProcSignalSlots; ++i)
|
||||
{
|
||||
ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
|
||||
@ -163,7 +171,7 @@ ProcSignalInit(int pss_idx)
|
||||
ProcSignalSlot *slot;
|
||||
uint64 barrier_generation;
|
||||
|
||||
Assert(pss_idx >= 1 && pss_idx <= NumProcSignalSlots);
|
||||
Assert(pss_idx >= 1 && pss_idx <= GetNumProcSignalSlots());
|
||||
|
||||
slot = &ProcSignal->psh_slot[pss_idx - 1];
|
||||
|
||||
@ -292,7 +300,7 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = NumProcSignalSlots - 1; i >= 0; i--)
|
||||
for (i = GetNumProcSignalSlots() - 1; i >= 0; i--)
|
||||
{
|
||||
slot = &ProcSignal->psh_slot[i];
|
||||
|
||||
@ -333,6 +341,7 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
|
||||
{
|
||||
uint32 flagbit = 1 << (uint32) type;
|
||||
uint64 generation;
|
||||
int numProcSignalSlots = GetNumProcSignalSlots();
|
||||
|
||||
/*
|
||||
* Set all the flags.
|
||||
@ -342,7 +351,7 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
|
||||
* anything that we do afterwards. (This is also true of the later call to
|
||||
* pg_atomic_add_fetch_u64.)
|
||||
*/
|
||||
for (int i = 0; i < NumProcSignalSlots; i++)
|
||||
for (int i = 0; i < numProcSignalSlots; i++)
|
||||
{
|
||||
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
|
||||
@ -368,7 +377,7 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
|
||||
* backends that need to update state - but they won't actually need to
|
||||
* change any state.
|
||||
*/
|
||||
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
|
||||
for (int i = numProcSignalSlots - 1; i >= 0; i--)
|
||||
{
|
||||
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
pid_t pid = slot->pss_pid;
|
||||
@ -393,7 +402,7 @@ WaitForProcSignalBarrier(uint64 generation)
|
||||
{
|
||||
Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
|
||||
|
||||
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
|
||||
for (int i = GetNumProcSignalSlots() - 1; i >= 0; i--)
|
||||
{
|
||||
ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
|
||||
uint64 oldval;
|
||||
|
@ -205,7 +205,7 @@ SInvalShmemSize(void)
|
||||
Size size;
|
||||
|
||||
size = offsetof(SISeg, procState);
|
||||
size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
|
||||
size = add_size(size, mul_size(sizeof(ProcState), GetMaxBackends()));
|
||||
|
||||
return size;
|
||||
}
|
||||
@ -231,7 +231,7 @@ CreateSharedInvalidationState(void)
|
||||
shmInvalBuffer->maxMsgNum = 0;
|
||||
shmInvalBuffer->nextThreshold = CLEANUP_MIN;
|
||||
shmInvalBuffer->lastBackend = 0;
|
||||
shmInvalBuffer->maxBackends = MaxBackends;
|
||||
shmInvalBuffer->maxBackends = GetMaxBackends();
|
||||
SpinLockInit(&shmInvalBuffer->msgnumLock);
|
||||
|
||||
/* The buffer[] array is initially all unused, so we need not fill it */
|
||||
|
@ -143,6 +143,7 @@ void
|
||||
InitDeadLockChecking(void)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
/* Make sure allocations are permanent */
|
||||
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
|
||||
@ -151,16 +152,16 @@ InitDeadLockChecking(void)
|
||||
* FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
|
||||
* deadlockDetails[].
|
||||
*/
|
||||
visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
|
||||
deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
|
||||
visitedProcs = (PGPROC **) palloc(max_backends * sizeof(PGPROC *));
|
||||
deadlockDetails = (DEADLOCK_INFO *) palloc(max_backends * sizeof(DEADLOCK_INFO));
|
||||
|
||||
/*
|
||||
* TopoSort needs to consider at most MaxBackends wait-queue entries, and
|
||||
* it needn't run concurrently with FindLockCycle.
|
||||
*/
|
||||
topoProcs = visitedProcs; /* re-use this space */
|
||||
beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
|
||||
afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
|
||||
beforeConstraints = (int *) palloc(max_backends * sizeof(int));
|
||||
afterConstraints = (int *) palloc(max_backends * sizeof(int));
|
||||
|
||||
/*
|
||||
* We need to consider rearranging at most MaxBackends/2 wait queues
|
||||
@ -169,8 +170,8 @@ InitDeadLockChecking(void)
|
||||
* MaxBackends total waiters.
|
||||
*/
|
||||
waitOrders = (WAIT_ORDER *)
|
||||
palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
|
||||
waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
|
||||
palloc((max_backends / 2) * sizeof(WAIT_ORDER));
|
||||
waitOrderProcs = (PGPROC **) palloc(max_backends * sizeof(PGPROC *));
|
||||
|
||||
/*
|
||||
* Allow at most MaxBackends distinct constraints in a configuration. (Is
|
||||
@ -180,7 +181,7 @@ InitDeadLockChecking(void)
|
||||
* limits the maximum recursion depth of DeadLockCheckRecurse. Making it
|
||||
* really big might potentially allow a stack-overflow problem.
|
||||
*/
|
||||
maxCurConstraints = MaxBackends;
|
||||
maxCurConstraints = max_backends;
|
||||
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
|
||||
|
||||
/*
|
||||
@ -191,7 +192,7 @@ InitDeadLockChecking(void)
|
||||
* last MaxBackends entries in possibleConstraints[] are reserved as
|
||||
* output workspace for FindLockCycle.
|
||||
*/
|
||||
maxPossibleConstraints = MaxBackends * 4;
|
||||
maxPossibleConstraints = max_backends * 4;
|
||||
possibleConstraints =
|
||||
(EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
|
||||
|
||||
@ -327,7 +328,7 @@ DeadLockCheckRecurse(PGPROC *proc)
|
||||
if (nCurConstraints >= maxCurConstraints)
|
||||
return true; /* out of room for active constraints? */
|
||||
oldPossibleConstraints = nPossibleConstraints;
|
||||
if (nPossibleConstraints + nEdges + MaxBackends <= maxPossibleConstraints)
|
||||
if (nPossibleConstraints + nEdges + GetMaxBackends() <= maxPossibleConstraints)
|
||||
{
|
||||
/* We can save the edge list in possibleConstraints[] */
|
||||
nPossibleConstraints += nEdges;
|
||||
@ -388,7 +389,7 @@ TestConfiguration(PGPROC *startProc)
|
||||
/*
|
||||
* Make sure we have room for FindLockCycle's output.
|
||||
*/
|
||||
if (nPossibleConstraints + MaxBackends > maxPossibleConstraints)
|
||||
if (nPossibleConstraints + GetMaxBackends() > maxPossibleConstraints)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
@ -486,7 +487,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
|
||||
* record total length of cycle --- outer levels will now fill
|
||||
* deadlockDetails[]
|
||||
*/
|
||||
Assert(depth <= MaxBackends);
|
||||
Assert(depth <= GetMaxBackends());
|
||||
nDeadlockDetails = depth;
|
||||
|
||||
return true;
|
||||
@ -500,7 +501,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
|
||||
}
|
||||
}
|
||||
/* Mark proc as seen */
|
||||
Assert(nVisitedProcs < MaxBackends);
|
||||
Assert(nVisitedProcs < GetMaxBackends());
|
||||
visitedProcs[nVisitedProcs++] = checkProc;
|
||||
|
||||
/*
|
||||
@ -698,7 +699,7 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
|
||||
/*
|
||||
* Add this edge to the list of soft edges in the cycle
|
||||
*/
|
||||
Assert(*nSoftEdges < MaxBackends);
|
||||
Assert(*nSoftEdges < GetMaxBackends());
|
||||
softEdges[*nSoftEdges].waiter = checkProcLeader;
|
||||
softEdges[*nSoftEdges].blocker = leader;
|
||||
softEdges[*nSoftEdges].lock = lock;
|
||||
@ -771,7 +772,7 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
|
||||
/*
|
||||
* Add this edge to the list of soft edges in the cycle
|
||||
*/
|
||||
Assert(*nSoftEdges < MaxBackends);
|
||||
Assert(*nSoftEdges < GetMaxBackends());
|
||||
softEdges[*nSoftEdges].waiter = checkProcLeader;
|
||||
softEdges[*nSoftEdges].blocker = leader;
|
||||
softEdges[*nSoftEdges].lock = lock;
|
||||
@ -834,7 +835,7 @@ ExpandConstraints(EDGE *constraints,
|
||||
waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
|
||||
waitOrders[nWaitOrders].nProcs = lock->waitProcs.size;
|
||||
nWaitOrderProcs += lock->waitProcs.size;
|
||||
Assert(nWaitOrderProcs <= MaxBackends);
|
||||
Assert(nWaitOrderProcs <= GetMaxBackends());
|
||||
|
||||
/*
|
||||
* Do the topo sort. TopoSort need not examine constraints after this
|
||||
|
@ -55,7 +55,7 @@
|
||||
int max_locks_per_xact; /* set by guc.c */
|
||||
|
||||
#define NLOCKENTS() \
|
||||
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
|
||||
mul_size(max_locks_per_xact, add_size(GetMaxBackends(), max_prepared_xacts))
|
||||
|
||||
|
||||
/*
|
||||
@ -2942,12 +2942,12 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
||||
vxids = (VirtualTransactionId *)
|
||||
MemoryContextAlloc(TopMemoryContext,
|
||||
sizeof(VirtualTransactionId) *
|
||||
(MaxBackends + max_prepared_xacts + 1));
|
||||
(GetMaxBackends() + max_prepared_xacts + 1));
|
||||
}
|
||||
else
|
||||
vxids = (VirtualTransactionId *)
|
||||
palloc0(sizeof(VirtualTransactionId) *
|
||||
(MaxBackends + max_prepared_xacts + 1));
|
||||
(GetMaxBackends() + max_prepared_xacts + 1));
|
||||
|
||||
/* Compute hash code and partition lock, and look up conflicting modes. */
|
||||
hashcode = LockTagHashCode(locktag);
|
||||
@ -3104,7 +3104,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
||||
|
||||
LWLockRelease(partitionLock);
|
||||
|
||||
if (count > MaxBackends + max_prepared_xacts) /* should never happen */
|
||||
if (count > GetMaxBackends() + max_prepared_xacts) /* should never happen */
|
||||
elog(PANIC, "too many conflicting locks found");
|
||||
|
||||
vxids[count].backendId = InvalidBackendId;
|
||||
@ -3651,11 +3651,12 @@ GetLockStatusData(void)
|
||||
int els;
|
||||
int el;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
data = (LockData *) palloc(sizeof(LockData));
|
||||
|
||||
/* Guess how much space we'll need. */
|
||||
els = MaxBackends;
|
||||
els = max_backends;
|
||||
el = 0;
|
||||
data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
|
||||
|
||||
@ -3689,7 +3690,7 @@ GetLockStatusData(void)
|
||||
|
||||
if (el >= els)
|
||||
{
|
||||
els += MaxBackends;
|
||||
els += max_backends;
|
||||
data->locks = (LockInstanceData *)
|
||||
repalloc(data->locks, sizeof(LockInstanceData) * els);
|
||||
}
|
||||
@ -3721,7 +3722,7 @@ GetLockStatusData(void)
|
||||
|
||||
if (el >= els)
|
||||
{
|
||||
els += MaxBackends;
|
||||
els += max_backends;
|
||||
data->locks = (LockInstanceData *)
|
||||
repalloc(data->locks, sizeof(LockInstanceData) * els);
|
||||
}
|
||||
@ -3850,7 +3851,7 @@ GetBlockerStatusData(int blocked_pid)
|
||||
* for the procs[] array; the other two could need enlargement, though.)
|
||||
*/
|
||||
data->nprocs = data->nlocks = data->npids = 0;
|
||||
data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
|
||||
data->maxprocs = data->maxlocks = data->maxpids = GetMaxBackends();
|
||||
data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
|
||||
data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
|
||||
data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
|
||||
@ -3925,6 +3926,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
|
||||
PGPROC *proc;
|
||||
int queue_size;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
/* Nothing to do if this proc is not blocked */
|
||||
if (theLock == NULL)
|
||||
@ -3953,7 +3955,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
|
||||
|
||||
if (data->nlocks >= data->maxlocks)
|
||||
{
|
||||
data->maxlocks += MaxBackends;
|
||||
data->maxlocks += max_backends;
|
||||
data->locks = (LockInstanceData *)
|
||||
repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
|
||||
}
|
||||
@ -3982,7 +3984,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
|
||||
|
||||
if (queue_size > data->maxpids - data->npids)
|
||||
{
|
||||
data->maxpids = Max(data->maxpids + MaxBackends,
|
||||
data->maxpids = Max(data->maxpids + max_backends,
|
||||
data->npids + queue_size);
|
||||
data->waiter_pids = (int *) repalloc(data->waiter_pids,
|
||||
sizeof(int) * data->maxpids);
|
||||
|
@ -257,7 +257,7 @@
|
||||
(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
|
||||
|
||||
#define NPREDICATELOCKTARGETENTS() \
|
||||
mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
|
||||
mul_size(max_predicate_locks_per_xact, add_size(GetMaxBackends(), max_prepared_xacts))
|
||||
|
||||
#define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
|
||||
|
||||
@ -1222,7 +1222,7 @@ InitPredicateLocks(void)
|
||||
* Compute size for serializable transaction hashtable. Note these
|
||||
* calculations must agree with PredicateLockShmemSize!
|
||||
*/
|
||||
max_table_size = (MaxBackends + max_prepared_xacts);
|
||||
max_table_size = (GetMaxBackends() + max_prepared_xacts);
|
||||
|
||||
/*
|
||||
* Allocate a list to hold information on transactions participating in
|
||||
@ -1375,7 +1375,7 @@ PredicateLockShmemSize(void)
|
||||
size = add_size(size, size / 10);
|
||||
|
||||
/* transaction list */
|
||||
max_table_size = MaxBackends + max_prepared_xacts;
|
||||
max_table_size = GetMaxBackends() + max_prepared_xacts;
|
||||
max_table_size *= 10;
|
||||
size = add_size(size, PredXactListDataSize);
|
||||
size = add_size(size, mul_size((Size) max_table_size,
|
||||
@ -1907,7 +1907,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
|
||||
{
|
||||
++(PredXact->WritableSxactCount);
|
||||
Assert(PredXact->WritableSxactCount <=
|
||||
(MaxBackends + max_prepared_xacts));
|
||||
(GetMaxBackends() + max_prepared_xacts));
|
||||
}
|
||||
|
||||
MySerializableXact = sxact;
|
||||
@ -5111,7 +5111,7 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
|
||||
{
|
||||
++(PredXact->WritableSxactCount);
|
||||
Assert(PredXact->WritableSxactCount <=
|
||||
(MaxBackends + max_prepared_xacts));
|
||||
(GetMaxBackends() + max_prepared_xacts));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -103,7 +103,7 @@ ProcGlobalShmemSize(void)
|
||||
{
|
||||
Size size = 0;
|
||||
Size TotalProcs =
|
||||
add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
|
||||
add_size(GetMaxBackends(), add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
|
||||
|
||||
/* ProcGlobal */
|
||||
size = add_size(size, sizeof(PROC_HDR));
|
||||
@ -127,7 +127,7 @@ ProcGlobalSemas(void)
|
||||
* We need a sema per backend (including autovacuum), plus one for each
|
||||
* auxiliary process.
|
||||
*/
|
||||
return MaxBackends + NUM_AUXILIARY_PROCS;
|
||||
return GetMaxBackends() + NUM_AUXILIARY_PROCS;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -162,7 +162,8 @@ InitProcGlobal(void)
|
||||
int i,
|
||||
j;
|
||||
bool found;
|
||||
uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
|
||||
int max_backends = GetMaxBackends();
|
||||
uint32 TotalProcs = max_backends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
|
||||
|
||||
/* Create the ProcGlobal shared structure */
|
||||
ProcGlobal = (PROC_HDR *)
|
||||
@ -195,7 +196,7 @@ InitProcGlobal(void)
|
||||
MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
|
||||
ProcGlobal->allProcs = procs;
|
||||
/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
|
||||
ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
|
||||
ProcGlobal->allProcCount = max_backends + NUM_AUXILIARY_PROCS;
|
||||
|
||||
/*
|
||||
* Allocate arrays mirroring PGPROC fields in a dense manner. See
|
||||
@ -221,7 +222,7 @@ InitProcGlobal(void)
|
||||
* dummy PGPROCs don't need these though - they're never associated
|
||||
* with a real process
|
||||
*/
|
||||
if (i < MaxBackends + NUM_AUXILIARY_PROCS)
|
||||
if (i < max_backends + NUM_AUXILIARY_PROCS)
|
||||
{
|
||||
procs[i].sem = PGSemaphoreCreate();
|
||||
InitSharedLatch(&(procs[i].procLatch));
|
||||
@ -258,7 +259,7 @@ InitProcGlobal(void)
|
||||
ProcGlobal->bgworkerFreeProcs = &procs[i];
|
||||
procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
|
||||
}
|
||||
else if (i < MaxBackends)
|
||||
else if (i < max_backends)
|
||||
{
|
||||
/* PGPROC for walsender, add to walsenderFreeProcs list */
|
||||
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
|
||||
@ -286,8 +287,8 @@ InitProcGlobal(void)
|
||||
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
|
||||
* processes and prepared transactions.
|
||||
*/
|
||||
AuxiliaryProcs = &procs[MaxBackends];
|
||||
PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
|
||||
AuxiliaryProcs = &procs[max_backends];
|
||||
PreparedXactProcs = &procs[max_backends + NUM_AUXILIARY_PROCS];
|
||||
|
||||
/* Create ProcStructLock spinlock, too */
|
||||
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
|
||||
|
@ -26,18 +26,6 @@
|
||||
#include "utils/memutils.h"
|
||||
|
||||
|
||||
/* ----------
|
||||
* Total number of backends including auxiliary
|
||||
*
|
||||
* We reserve a slot for each possible BackendId, plus one for each
|
||||
* possible auxiliary process type. (This scheme assumes there is not
|
||||
* more than one of any auxiliary process type at a time.) MaxBackends
|
||||
* includes autovacuum workers and background workers as well.
|
||||
* ----------
|
||||
*/
|
||||
#define NumBackendStatSlots (MaxBackends + NUM_AUXPROCTYPES)
|
||||
|
||||
|
||||
/* ----------
|
||||
* GUC parameters
|
||||
* ----------
|
||||
@ -75,8 +63,23 @@ static MemoryContext backendStatusSnapContext;
|
||||
static void pgstat_beshutdown_hook(int code, Datum arg);
|
||||
static void pgstat_read_current_status(void);
|
||||
static void pgstat_setup_backend_status_context(void);
|
||||
static inline int GetNumBackendStatSlots(void);
|
||||
|
||||
|
||||
/*
|
||||
* Retrieve the total number of backends including auxiliary
|
||||
*
|
||||
* We reserve a slot for each possible BackendId, plus one for each possible
|
||||
* auxiliary process type. (This scheme assumes there is not more than one of
|
||||
* any auxiliary process type at a time.) MaxBackends includes autovacuum
|
||||
* workers and background workers as well.
|
||||
*/
|
||||
static inline int
|
||||
GetNumBackendStatSlots(void)
|
||||
{
|
||||
return GetMaxBackends() + NUM_AUXPROCTYPES;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report shared-memory space needed by CreateSharedBackendStatus.
|
||||
*/
|
||||
@ -84,27 +87,28 @@ Size
|
||||
BackendStatusShmemSize(void)
|
||||
{
|
||||
Size size;
|
||||
int numBackendStatSlots = GetNumBackendStatSlots();
|
||||
|
||||
/* BackendStatusArray: */
|
||||
size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
|
||||
size = mul_size(sizeof(PgBackendStatus), numBackendStatSlots);
|
||||
/* BackendAppnameBuffer: */
|
||||
size = add_size(size,
|
||||
mul_size(NAMEDATALEN, NumBackendStatSlots));
|
||||
mul_size(NAMEDATALEN, numBackendStatSlots));
|
||||
/* BackendClientHostnameBuffer: */
|
||||
size = add_size(size,
|
||||
mul_size(NAMEDATALEN, NumBackendStatSlots));
|
||||
mul_size(NAMEDATALEN, numBackendStatSlots));
|
||||
/* BackendActivityBuffer: */
|
||||
size = add_size(size,
|
||||
mul_size(pgstat_track_activity_query_size, NumBackendStatSlots));
|
||||
mul_size(pgstat_track_activity_query_size, numBackendStatSlots));
|
||||
#ifdef USE_SSL
|
||||
/* BackendSslStatusBuffer: */
|
||||
size = add_size(size,
|
||||
mul_size(sizeof(PgBackendSSLStatus), NumBackendStatSlots));
|
||||
mul_size(sizeof(PgBackendSSLStatus), numBackendStatSlots));
|
||||
#endif
|
||||
#ifdef ENABLE_GSS
|
||||
/* BackendGssStatusBuffer: */
|
||||
size = add_size(size,
|
||||
mul_size(sizeof(PgBackendGSSStatus), NumBackendStatSlots));
|
||||
mul_size(sizeof(PgBackendGSSStatus), numBackendStatSlots));
|
||||
#endif
|
||||
return size;
|
||||
}
|
||||
@ -120,9 +124,10 @@ CreateSharedBackendStatus(void)
|
||||
bool found;
|
||||
int i;
|
||||
char *buffer;
|
||||
int numBackendStatSlots = GetNumBackendStatSlots();
|
||||
|
||||
/* Create or attach to the shared array */
|
||||
size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
|
||||
size = mul_size(sizeof(PgBackendStatus), numBackendStatSlots);
|
||||
BackendStatusArray = (PgBackendStatus *)
|
||||
ShmemInitStruct("Backend Status Array", size, &found);
|
||||
|
||||
@ -135,7 +140,7 @@ CreateSharedBackendStatus(void)
|
||||
}
|
||||
|
||||
/* Create or attach to the shared appname buffer */
|
||||
size = mul_size(NAMEDATALEN, NumBackendStatSlots);
|
||||
size = mul_size(NAMEDATALEN, numBackendStatSlots);
|
||||
BackendAppnameBuffer = (char *)
|
||||
ShmemInitStruct("Backend Application Name Buffer", size, &found);
|
||||
|
||||
@ -145,7 +150,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
/* Initialize st_appname pointers. */
|
||||
buffer = BackendAppnameBuffer;
|
||||
for (i = 0; i < NumBackendStatSlots; i++)
|
||||
for (i = 0; i < numBackendStatSlots; i++)
|
||||
{
|
||||
BackendStatusArray[i].st_appname = buffer;
|
||||
buffer += NAMEDATALEN;
|
||||
@ -153,7 +158,7 @@ CreateSharedBackendStatus(void)
|
||||
}
|
||||
|
||||
/* Create or attach to the shared client hostname buffer */
|
||||
size = mul_size(NAMEDATALEN, NumBackendStatSlots);
|
||||
size = mul_size(NAMEDATALEN, numBackendStatSlots);
|
||||
BackendClientHostnameBuffer = (char *)
|
||||
ShmemInitStruct("Backend Client Host Name Buffer", size, &found);
|
||||
|
||||
@ -163,7 +168,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
/* Initialize st_clienthostname pointers. */
|
||||
buffer = BackendClientHostnameBuffer;
|
||||
for (i = 0; i < NumBackendStatSlots; i++)
|
||||
for (i = 0; i < numBackendStatSlots; i++)
|
||||
{
|
||||
BackendStatusArray[i].st_clienthostname = buffer;
|
||||
buffer += NAMEDATALEN;
|
||||
@ -172,7 +177,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
/* Create or attach to the shared activity buffer */
|
||||
BackendActivityBufferSize = mul_size(pgstat_track_activity_query_size,
|
||||
NumBackendStatSlots);
|
||||
numBackendStatSlots);
|
||||
BackendActivityBuffer = (char *)
|
||||
ShmemInitStruct("Backend Activity Buffer",
|
||||
BackendActivityBufferSize,
|
||||
@ -184,7 +189,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
/* Initialize st_activity pointers. */
|
||||
buffer = BackendActivityBuffer;
|
||||
for (i = 0; i < NumBackendStatSlots; i++)
|
||||
for (i = 0; i < numBackendStatSlots; i++)
|
||||
{
|
||||
BackendStatusArray[i].st_activity_raw = buffer;
|
||||
buffer += pgstat_track_activity_query_size;
|
||||
@ -193,7 +198,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
#ifdef USE_SSL
|
||||
/* Create or attach to the shared SSL status buffer */
|
||||
size = mul_size(sizeof(PgBackendSSLStatus), NumBackendStatSlots);
|
||||
size = mul_size(sizeof(PgBackendSSLStatus), numBackendStatSlots);
|
||||
BackendSslStatusBuffer = (PgBackendSSLStatus *)
|
||||
ShmemInitStruct("Backend SSL Status Buffer", size, &found);
|
||||
|
||||
@ -205,7 +210,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
/* Initialize st_sslstatus pointers. */
|
||||
ptr = BackendSslStatusBuffer;
|
||||
for (i = 0; i < NumBackendStatSlots; i++)
|
||||
for (i = 0; i < numBackendStatSlots; i++)
|
||||
{
|
||||
BackendStatusArray[i].st_sslstatus = ptr;
|
||||
ptr++;
|
||||
@ -215,7 +220,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
#ifdef ENABLE_GSS
|
||||
/* Create or attach to the shared GSSAPI status buffer */
|
||||
size = mul_size(sizeof(PgBackendGSSStatus), NumBackendStatSlots);
|
||||
size = mul_size(sizeof(PgBackendGSSStatus), numBackendStatSlots);
|
||||
BackendGssStatusBuffer = (PgBackendGSSStatus *)
|
||||
ShmemInitStruct("Backend GSS Status Buffer", size, &found);
|
||||
|
||||
@ -227,7 +232,7 @@ CreateSharedBackendStatus(void)
|
||||
|
||||
/* Initialize st_gssstatus pointers. */
|
||||
ptr = BackendGssStatusBuffer;
|
||||
for (i = 0; i < NumBackendStatSlots; i++)
|
||||
for (i = 0; i < numBackendStatSlots; i++)
|
||||
{
|
||||
BackendStatusArray[i].st_gssstatus = ptr;
|
||||
ptr++;
|
||||
@ -251,7 +256,7 @@ pgstat_beinit(void)
|
||||
/* Initialize MyBEEntry */
|
||||
if (MyBackendId != InvalidBackendId)
|
||||
{
|
||||
Assert(MyBackendId >= 1 && MyBackendId <= MaxBackends);
|
||||
Assert(MyBackendId >= 1 && MyBackendId <= GetMaxBackends());
|
||||
MyBEEntry = &BackendStatusArray[MyBackendId - 1];
|
||||
}
|
||||
else
|
||||
@ -267,7 +272,7 @@ pgstat_beinit(void)
|
||||
* MaxBackends + AuxBackendType + 1 as the index of the slot for an
|
||||
* auxiliary process.
|
||||
*/
|
||||
MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
|
||||
MyBEEntry = &BackendStatusArray[GetMaxBackends() + MyAuxProcType];
|
||||
}
|
||||
|
||||
/* Set up a process-exit hook to clean up */
|
||||
@ -739,6 +744,7 @@ pgstat_read_current_status(void)
|
||||
PgBackendGSSStatus *localgssstatus;
|
||||
#endif
|
||||
int i;
|
||||
int numBackendStatSlots = GetNumBackendStatSlots();
|
||||
|
||||
if (localBackendStatusTable)
|
||||
return; /* already done */
|
||||
@ -755,32 +761,32 @@ pgstat_read_current_status(void)
|
||||
*/
|
||||
localtable = (LocalPgBackendStatus *)
|
||||
MemoryContextAlloc(backendStatusSnapContext,
|
||||
sizeof(LocalPgBackendStatus) * NumBackendStatSlots);
|
||||
sizeof(LocalPgBackendStatus) * numBackendStatSlots);
|
||||
localappname = (char *)
|
||||
MemoryContextAlloc(backendStatusSnapContext,
|
||||
NAMEDATALEN * NumBackendStatSlots);
|
||||
NAMEDATALEN * numBackendStatSlots);
|
||||
localclienthostname = (char *)
|
||||
MemoryContextAlloc(backendStatusSnapContext,
|
||||
NAMEDATALEN * NumBackendStatSlots);
|
||||
NAMEDATALEN * numBackendStatSlots);
|
||||
localactivity = (char *)
|
||||
MemoryContextAllocHuge(backendStatusSnapContext,
|
||||
pgstat_track_activity_query_size * NumBackendStatSlots);
|
||||
pgstat_track_activity_query_size * numBackendStatSlots);
|
||||
#ifdef USE_SSL
|
||||
localsslstatus = (PgBackendSSLStatus *)
|
||||
MemoryContextAlloc(backendStatusSnapContext,
|
||||
sizeof(PgBackendSSLStatus) * NumBackendStatSlots);
|
||||
sizeof(PgBackendSSLStatus) * numBackendStatSlots);
|
||||
#endif
|
||||
#ifdef ENABLE_GSS
|
||||
localgssstatus = (PgBackendGSSStatus *)
|
||||
MemoryContextAlloc(backendStatusSnapContext,
|
||||
sizeof(PgBackendGSSStatus) * NumBackendStatSlots);
|
||||
sizeof(PgBackendGSSStatus) * numBackendStatSlots);
|
||||
#endif
|
||||
|
||||
localNumBackends = 0;
|
||||
|
||||
beentry = BackendStatusArray;
|
||||
localentry = localtable;
|
||||
for (i = 1; i <= NumBackendStatSlots; i++)
|
||||
for (i = 1; i <= numBackendStatSlots; i++)
|
||||
{
|
||||
/*
|
||||
* Follow the protocol of retrying if st_changecount changes while we
|
||||
@ -893,9 +899,10 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
|
||||
{
|
||||
PgBackendStatus *beentry;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
beentry = BackendStatusArray;
|
||||
for (i = 1; i <= MaxBackends; i++)
|
||||
for (i = 1; i <= max_backends; i++)
|
||||
{
|
||||
/*
|
||||
* Although we expect the target backend's entry to be stable, that
|
||||
@ -971,6 +978,7 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
|
||||
{
|
||||
volatile PgBackendStatus *beentry;
|
||||
int i;
|
||||
int max_backends = GetMaxBackends();
|
||||
|
||||
beentry = BackendStatusArray;
|
||||
|
||||
@ -981,7 +989,7 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
|
||||
if (beentry == NULL || BackendActivityBuffer == NULL)
|
||||
return NULL;
|
||||
|
||||
for (i = 1; i <= MaxBackends; i++)
|
||||
for (i = 1; i <= max_backends; i++)
|
||||
{
|
||||
if (beentry->st_procpid == pid)
|
||||
{
|
||||
|
@ -561,11 +561,11 @@ pg_safe_snapshot_blocking_pids(PG_FUNCTION_ARGS)
|
||||
Datum *blocker_datums;
|
||||
|
||||
/* A buffer big enough for any possible blocker list without truncation */
|
||||
blockers = (int *) palloc(MaxBackends * sizeof(int));
|
||||
blockers = (int *) palloc(GetMaxBackends() * sizeof(int));
|
||||
|
||||
/* Collect a snapshot of processes waited for by GetSafeSnapshot */
|
||||
num_blockers =
|
||||
GetSafeSnapshotBlockingPids(blocked_pid, blockers, MaxBackends);
|
||||
GetSafeSnapshotBlockingPids(blocked_pid, blockers, GetMaxBackends());
|
||||
|
||||
/* Convert int array to Datum array */
|
||||
if (num_blockers > 0)
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "access/session.h"
|
||||
#include "access/sysattr.h"
|
||||
#include "access/tableam.h"
|
||||
#include "access/twophase.h"
|
||||
#include "access/xact.h"
|
||||
#include "access/xlog.h"
|
||||
#include "access/xloginsert.h"
|
||||
@ -65,6 +66,9 @@
|
||||
#include "utils/syscache.h"
|
||||
#include "utils/timeout.h"
|
||||
|
||||
static int MaxBackends = 0;
|
||||
static int MaxBackendsInitialized = false;
|
||||
|
||||
static HeapTuple GetDatabaseTuple(const char *dbname);
|
||||
static HeapTuple GetDatabaseTupleByOid(Oid dboid);
|
||||
static void PerformAuthentication(Port *port);
|
||||
@ -495,15 +499,49 @@ pg_split_opts(char **argv, int *argcp, const char *optstr)
|
||||
void
|
||||
InitializeMaxBackends(void)
|
||||
{
|
||||
Assert(MaxBackends == 0);
|
||||
|
||||
/* the extra unit accounts for the autovacuum launcher */
|
||||
MaxBackends = MaxConnections + autovacuum_max_workers + 1 +
|
||||
max_worker_processes + max_wal_senders;
|
||||
SetMaxBackends(MaxConnections + autovacuum_max_workers + 1 +
|
||||
max_worker_processes + max_wal_senders);
|
||||
}
|
||||
|
||||
/*
|
||||
* Safely retrieve the value of MaxBackends.
|
||||
*
|
||||
* Previously, MaxBackends was externally visible, but it was often used before
|
||||
* it was initialized (e.g., in preloaded libraries' _PG_init() functions).
|
||||
* Unfortunately, we cannot initialize MaxBackends before processing
|
||||
* shared_preload_libraries because the libraries sometimes alter GUCs that are
|
||||
* used to calculate its value. Instead, we provide this function for accessing
|
||||
* MaxBackends, and we ERROR if someone calls it before it is initialized.
|
||||
*/
|
||||
int
|
||||
GetMaxBackends(void)
|
||||
{
|
||||
if (unlikely(!MaxBackendsInitialized))
|
||||
elog(ERROR, "MaxBackends not yet initialized");
|
||||
|
||||
return MaxBackends;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the value of MaxBackends.
|
||||
*
|
||||
* This should only be used by InitializeMaxBackends() and
|
||||
* restore_backend_variables(). If MaxBackends is already initialized or the
|
||||
* specified value is greater than the maximum, this will ERROR.
|
||||
*/
|
||||
void
|
||||
SetMaxBackends(int max_backends)
|
||||
{
|
||||
if (MaxBackendsInitialized)
|
||||
elog(ERROR, "MaxBackends already initialized");
|
||||
|
||||
/* internal error because the values were all checked previously */
|
||||
if (MaxBackends > MAX_BACKENDS)
|
||||
if (max_backends > MAX_BACKENDS)
|
||||
elog(ERROR, "too many backends configured");
|
||||
|
||||
MaxBackends = max_backends;
|
||||
MaxBackendsInitialized = true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -609,7 +647,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
|
||||
|
||||
SharedInvalBackendInit(false);
|
||||
|
||||
if (MyBackendId > MaxBackends || MyBackendId <= 0)
|
||||
if (MyBackendId > GetMaxBackends() || MyBackendId <= 0)
|
||||
elog(FATAL, "bad backend ID: %d", MyBackendId);
|
||||
|
||||
/* Now that we have a BackendId, we can participate in ProcSignal */
|
||||
|
@ -172,7 +172,6 @@ extern PGDLLIMPORT char *DataDir;
|
||||
extern PGDLLIMPORT int data_directory_mode;
|
||||
|
||||
extern PGDLLIMPORT int NBuffers;
|
||||
extern PGDLLIMPORT int MaxBackends;
|
||||
extern PGDLLIMPORT int MaxConnections;
|
||||
extern PGDLLIMPORT int max_worker_processes;
|
||||
extern PGDLLIMPORT int max_parallel_workers;
|
||||
@ -457,6 +456,8 @@ extern AuxProcType MyAuxProcType;
|
||||
/* in utils/init/postinit.c */
|
||||
extern void pg_split_opts(char **argv, int *argcp, const char *optstr);
|
||||
extern void InitializeMaxBackends(void);
|
||||
extern int GetMaxBackends(void);
|
||||
extern void SetMaxBackends(int max_backends);
|
||||
extern void InitPostgres(const char *in_dbname, Oid dboid, const char *username,
|
||||
Oid useroid, char *out_dbname, bool override_allow_connections);
|
||||
extern void BaseInit(void);
|
||||
|
Loading…
x
Reference in New Issue
Block a user