mirror of
https://github.com/postgres/postgres.git
synced 2025-09-03 15:22:11 +03:00
Katherine Ward wrote:
> Changes to avoid collisions with WIN32 & MFC names... > 1. Renamed: > a. PROC => PGPROC > b. GetUserName() => GetUserNameFromId() > c. GetCurrentTime() => GetCurrentDateTime() > d. IGNORE => IGNORE_DTF in include/utils/datetime.h & utils/adt/datetim > > 2. Added _P to some lex/yacc tokens: > CONST, CHAR, DELETE, FLOAT, GROUP, IN, OUT Jan
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.47 2002/05/24 18:57:56 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.48 2002/06/11 13:40:51 wieck Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -97,7 +97,7 @@ ReceiveSharedInvalidMessages(
|
||||
* The routines later in this file that use shared mode are okay with
|
||||
* this, because they aren't looking at the ProcState fields
|
||||
* associated with SI message transfer; they only use the
|
||||
* ProcState array as an easy way to find all the PROC structures.
|
||||
* ProcState array as an easy way to find all the PGPROC structures.
|
||||
*/
|
||||
LWLockAcquire(SInvalLock, LW_SHARED);
|
||||
getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data);
|
||||
@@ -130,12 +130,12 @@ ReceiveSharedInvalidMessages(
|
||||
|
||||
|
||||
/****************************************************************************/
|
||||
/* Functions that need to scan the PROC structures of all running backends. */
|
||||
/* Functions that need to scan the PGPROC structures of all running backends. */
|
||||
/* It's a bit strange to keep these in sinval.c, since they don't have any */
|
||||
/* direct relationship to shared-cache invalidation. But the procState */
|
||||
/* array in the SI segment is the only place in the system where we have */
|
||||
/* an array of per-backend data, so it is the most convenient place to keep */
|
||||
/* pointers to the backends' PROC structures. We used to implement these */
|
||||
/* pointers to the backends' PGPROC structures. We used to implement these */
|
||||
/* functions with a slow, ugly search through the ShmemIndex hash table --- */
|
||||
/* now they are simple loops over the SI ProcState array. */
|
||||
/****************************************************************************/
|
||||
@@ -171,7 +171,7 @@ DatabaseHasActiveBackends(Oid databaseId, bool ignoreMyself)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
if (proc->databaseId == databaseId)
|
||||
{
|
||||
@@ -208,7 +208,7 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
/* Fetch xid just once - see GetNewTransactionId */
|
||||
TransactionId pxid = proc->xid;
|
||||
@@ -260,7 +260,7 @@ GetOldestXmin(bool allDbs)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
if (allDbs || proc->databaseId == MyDatabaseId)
|
||||
{
|
||||
@@ -371,7 +371,7 @@ GetSnapshotData(bool serializable)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
/* Fetch xid just once - see GetNewTransactionId */
|
||||
TransactionId xid = proc->xid;
|
||||
@@ -460,7 +460,7 @@ CountActiveBackends(void)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
if (proc == MyProc)
|
||||
continue; /* do not count myself */
|
||||
@@ -476,7 +476,7 @@ CountActiveBackends(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* GetUndoRecPtr -- returns oldest PROC->logRec.
|
||||
* GetUndoRecPtr -- returns oldest PGPROC->logRec.
|
||||
*/
|
||||
XLogRecPtr
|
||||
GetUndoRecPtr(void)
|
||||
@@ -495,7 +495,7 @@ GetUndoRecPtr(void)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
tempr = proc->logRec;
|
||||
if (tempr.xrecoff == 0)
|
||||
@@ -512,13 +512,13 @@ GetUndoRecPtr(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* BackendIdGetProc - given a BackendId, find its PROC structure
|
||||
* BackendIdGetProc - given a BackendId, find its PGPROC structure
|
||||
*
|
||||
* This is a trivial lookup in the ProcState array. We assume that the caller
|
||||
* knows that the backend isn't going to go away, so we do not bother with
|
||||
* locking.
|
||||
*/
|
||||
struct PROC *
|
||||
struct PGPROC *
|
||||
BackendIdGetProc(BackendId procId)
|
||||
{
|
||||
SISeg *segP = shmInvalBuffer;
|
||||
@@ -530,7 +530,7 @@ BackendIdGetProc(BackendId procId)
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset);
|
||||
|
||||
return proc;
|
||||
}
|
||||
|
@@ -12,7 +12,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.8 2001/10/28 06:25:50 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.9 2002/06/11 13:40:51 wieck Exp $
|
||||
*
|
||||
* Interface:
|
||||
*
|
||||
@@ -31,8 +31,8 @@
|
||||
/* One edge in the waits-for graph */
|
||||
typedef struct
|
||||
{
|
||||
PROC *waiter; /* the waiting process */
|
||||
PROC *blocker; /* the process it is waiting for */
|
||||
PGPROC *waiter; /* the waiting process */
|
||||
PGPROC *blocker; /* the process it is waiting for */
|
||||
int pred; /* workspace for TopoSort */
|
||||
int link; /* workspace for TopoSort */
|
||||
} EDGE;
|
||||
@@ -41,20 +41,20 @@ typedef struct
|
||||
typedef struct
|
||||
{
|
||||
LOCK *lock; /* the lock whose wait queue is described */
|
||||
PROC **procs; /* array of PROC *'s in new wait order */
|
||||
PGPROC **procs; /* array of PGPROC *'s in new wait order */
|
||||
int nProcs;
|
||||
} WAIT_ORDER;
|
||||
|
||||
|
||||
static bool DeadLockCheckRecurse(PROC *proc);
|
||||
static bool TestConfiguration(PROC *startProc);
|
||||
static bool FindLockCycle(PROC *checkProc,
|
||||
static bool DeadLockCheckRecurse(PGPROC *proc);
|
||||
static bool TestConfiguration(PGPROC *startProc);
|
||||
static bool FindLockCycle(PGPROC *checkProc,
|
||||
EDGE *softEdges, int *nSoftEdges);
|
||||
static bool FindLockCycleRecurse(PROC *checkProc,
|
||||
static bool FindLockCycleRecurse(PGPROC *checkProc,
|
||||
EDGE *softEdges, int *nSoftEdges);
|
||||
static bool ExpandConstraints(EDGE *constraints, int nConstraints);
|
||||
static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints,
|
||||
PROC **ordering);
|
||||
PGPROC **ordering);
|
||||
|
||||
#ifdef DEBUG_DEADLOCK
|
||||
static void PrintLockQueue(LOCK *lock, const char *info);
|
||||
@@ -66,18 +66,18 @@ static void PrintLockQueue(LOCK *lock, const char *info);
|
||||
*/
|
||||
|
||||
/* Workspace for FindLockCycle */
|
||||
static PROC **visitedProcs; /* Array of visited procs */
|
||||
static PGPROC **visitedProcs; /* Array of visited procs */
|
||||
static int nVisitedProcs;
|
||||
|
||||
/* Workspace for TopoSort */
|
||||
static PROC **topoProcs; /* Array of not-yet-output procs */
|
||||
static PGPROC **topoProcs; /* Array of not-yet-output procs */
|
||||
static int *beforeConstraints; /* Counts of remaining before-constraints */
|
||||
static int *afterConstraints; /* List head for after-constraints */
|
||||
|
||||
/* Output area for ExpandConstraints */
|
||||
static WAIT_ORDER *waitOrders; /* Array of proposed queue rearrangements */
|
||||
static int nWaitOrders;
|
||||
static PROC **waitOrderProcs; /* Space for waitOrders queue contents */
|
||||
static PGPROC **waitOrderProcs; /* Space for waitOrders queue contents */
|
||||
|
||||
/* Current list of constraints being considered */
|
||||
static EDGE *curConstraints;
|
||||
@@ -111,7 +111,7 @@ InitDeadLockChecking(void)
|
||||
/*
|
||||
* FindLockCycle needs at most MaxBackends entries in visitedProcs[]
|
||||
*/
|
||||
visitedProcs = (PROC **) palloc(MaxBackends * sizeof(PROC *));
|
||||
visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
|
||||
|
||||
/*
|
||||
* TopoSort needs to consider at most MaxBackends wait-queue entries,
|
||||
@@ -128,7 +128,7 @@ InitDeadLockChecking(void)
|
||||
* than MaxBackends total waiters.
|
||||
*/
|
||||
waitOrders = (WAIT_ORDER *) palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
|
||||
waitOrderProcs = (PROC **) palloc(MaxBackends * sizeof(PROC *));
|
||||
waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
|
||||
|
||||
/*
|
||||
* Allow at most MaxBackends distinct constraints in a configuration.
|
||||
@@ -176,7 +176,7 @@ InitDeadLockChecking(void)
|
||||
* interlocked!
|
||||
*/
|
||||
bool
|
||||
DeadLockCheck(PROC *proc)
|
||||
DeadLockCheck(PGPROC *proc)
|
||||
{
|
||||
int i,
|
||||
j;
|
||||
@@ -194,7 +194,7 @@ DeadLockCheck(PROC *proc)
|
||||
for (i = 0; i < nWaitOrders; i++)
|
||||
{
|
||||
LOCK *lock = waitOrders[i].lock;
|
||||
PROC **procs = waitOrders[i].procs;
|
||||
PGPROC **procs = waitOrders[i].procs;
|
||||
int nProcs = waitOrders[i].nProcs;
|
||||
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
||||
|
||||
@@ -234,7 +234,7 @@ DeadLockCheck(PROC *proc)
|
||||
* rearrangements of lock wait queues (if any).
|
||||
*/
|
||||
static bool
|
||||
DeadLockCheckRecurse(PROC *proc)
|
||||
DeadLockCheckRecurse(PGPROC *proc)
|
||||
{
|
||||
int nEdges;
|
||||
int oldPossibleConstraints;
|
||||
@@ -300,7 +300,7 @@ DeadLockCheckRecurse(PROC *proc)
|
||||
*--------------------
|
||||
*/
|
||||
static bool
|
||||
TestConfiguration(PROC *startProc)
|
||||
TestConfiguration(PGPROC *startProc)
|
||||
{
|
||||
int softFound = 0;
|
||||
EDGE *softEdges = possibleConstraints + nPossibleConstraints;
|
||||
@@ -365,7 +365,7 @@ TestConfiguration(PROC *startProc)
|
||||
* be believed in preference to the actual ordering seen in the locktable.
|
||||
*/
|
||||
static bool
|
||||
FindLockCycle(PROC *checkProc,
|
||||
FindLockCycle(PGPROC *checkProc,
|
||||
EDGE *softEdges, /* output argument */
|
||||
int *nSoftEdges) /* output argument */
|
||||
{
|
||||
@@ -375,11 +375,11 @@ FindLockCycle(PROC *checkProc,
|
||||
}
|
||||
|
||||
static bool
|
||||
FindLockCycleRecurse(PROC *checkProc,
|
||||
FindLockCycleRecurse(PGPROC *checkProc,
|
||||
EDGE *softEdges, /* output argument */
|
||||
int *nSoftEdges) /* output argument */
|
||||
{
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
LOCK *lock;
|
||||
HOLDER *holder;
|
||||
SHM_QUEUE *lockHolders;
|
||||
@@ -438,7 +438,7 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
|
||||
while (holder)
|
||||
{
|
||||
proc = (PROC *) MAKE_PTR(holder->tag.proc);
|
||||
proc = (PGPROC *) MAKE_PTR(holder->tag.proc);
|
||||
|
||||
/* A proc never blocks itself */
|
||||
if (proc != checkProc)
|
||||
@@ -480,7 +480,7 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
if (i < nWaitOrders)
|
||||
{
|
||||
/* Use the given hypothetical wait queue order */
|
||||
PROC **procs = waitOrders[i].procs;
|
||||
PGPROC **procs = waitOrders[i].procs;
|
||||
|
||||
queue_size = waitOrders[i].nProcs;
|
||||
|
||||
@@ -517,7 +517,7 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
waitQueue = &(lock->waitProcs);
|
||||
queue_size = waitQueue->size;
|
||||
|
||||
proc = (PROC *) MAKE_PTR(waitQueue->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
|
||||
|
||||
while (queue_size-- > 0)
|
||||
{
|
||||
@@ -543,7 +543,7 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
}
|
||||
}
|
||||
|
||||
proc = (PROC *) MAKE_PTR(proc->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(proc->links.next);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -559,7 +559,7 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
* specific new orderings for affected wait queues
|
||||
*
|
||||
* Input is a list of soft edges to be reversed. The output is a list
|
||||
* of nWaitOrders WAIT_ORDER structs in waitOrders[], with PROC array
|
||||
* of nWaitOrders WAIT_ORDER structs in waitOrders[], with PGPROC array
|
||||
* workspace in waitOrderProcs[].
|
||||
*
|
||||
* Returns TRUE if able to build an ordering that satisfies all the
|
||||
@@ -582,7 +582,7 @@ ExpandConstraints(EDGE *constraints,
|
||||
*/
|
||||
for (i = nConstraints; --i >= 0;)
|
||||
{
|
||||
PROC *proc = constraints[i].waiter;
|
||||
PGPROC *proc = constraints[i].waiter;
|
||||
LOCK *lock = proc->waitLock;
|
||||
|
||||
/* Did we already make a list for this lock? */
|
||||
@@ -628,7 +628,7 @@ ExpandConstraints(EDGE *constraints,
|
||||
* slowness of the algorithm won't really matter.
|
||||
*
|
||||
* The initial queue ordering is taken directly from the lock's wait queue.
|
||||
* The output is an array of PROC pointers, of length equal to the lock's
|
||||
* The output is an array of PGPROC pointers, of length equal to the lock's
|
||||
* wait queue length (the caller is responsible for providing this space).
|
||||
* The partial order is specified by an array of EDGE structs. Each EDGE
|
||||
* is one that we need to reverse, therefore the "waiter" must appear before
|
||||
@@ -642,22 +642,22 @@ static bool
|
||||
TopoSort(LOCK *lock,
|
||||
EDGE *constraints,
|
||||
int nConstraints,
|
||||
PROC **ordering) /* output argument */
|
||||
PGPROC **ordering) /* output argument */
|
||||
{
|
||||
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
||||
int queue_size = waitQueue->size;
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
int i,
|
||||
j,
|
||||
k,
|
||||
last;
|
||||
|
||||
/* First, fill topoProcs[] array with the procs in their current order */
|
||||
proc = (PROC *) MAKE_PTR(waitQueue->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
|
||||
for (i = 0; i < queue_size; i++)
|
||||
{
|
||||
topoProcs[i] = proc;
|
||||
proc = (PROC *) MAKE_PTR(proc->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(proc->links.next);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -743,15 +743,15 @@ PrintLockQueue(LOCK *lock, const char *info)
|
||||
{
|
||||
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
||||
int queue_size = waitQueue->size;
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
int i;
|
||||
|
||||
printf("%s lock %lx queue ", info, MAKE_OFFSET(lock));
|
||||
proc = (PROC *) MAKE_PTR(waitQueue->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
|
||||
for (i = 0; i < queue_size; i++)
|
||||
{
|
||||
printf(" %d", proc->pid);
|
||||
proc = (PROC *) MAKE_PTR(proc->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(proc->links.next);
|
||||
}
|
||||
printf("\n");
|
||||
fflush(stdout);
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.106 2002/03/06 06:10:06 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.107 2002/06/11 13:40:51 wieck Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Outside modules can create a lock table and acquire/release
|
||||
@@ -49,7 +49,7 @@ int max_locks_per_xact; /* set by guc.c */
|
||||
|
||||
static int WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
|
||||
LOCK *lock, HOLDER *holder);
|
||||
static void LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc,
|
||||
static void LockCountMyLocks(SHMEM_OFFSET lockOffset, PGPROC *proc,
|
||||
int *myHolding);
|
||||
|
||||
static char *lock_mode_names[] =
|
||||
@@ -746,7 +746,7 @@ LockCheckConflicts(LOCKMETHODTABLE *lockMethodTable,
|
||||
LOCKMODE lockmode,
|
||||
LOCK *lock,
|
||||
HOLDER *holder,
|
||||
PROC *proc,
|
||||
PGPROC *proc,
|
||||
int *myHolding) /* myHolding[] array or NULL */
|
||||
{
|
||||
LOCKMETHODCTL *lockctl = lockMethodTable->ctl;
|
||||
@@ -820,7 +820,7 @@ LockCheckConflicts(LOCKMETHODTABLE *lockMethodTable,
|
||||
* be a net slowdown.
|
||||
*/
|
||||
static void
|
||||
LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc, int *myHolding)
|
||||
LockCountMyLocks(SHMEM_OFFSET lockOffset, PGPROC *proc, int *myHolding)
|
||||
{
|
||||
SHM_QUEUE *procHolders = &(proc->procHolders);
|
||||
HOLDER *holder;
|
||||
@@ -944,7 +944,7 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
|
||||
* this routine can only happen if we are aborting the transaction.)
|
||||
*/
|
||||
void
|
||||
RemoveFromWaitQueue(PROC *proc)
|
||||
RemoveFromWaitQueue(PGPROC *proc)
|
||||
{
|
||||
LOCK *waitLock = proc->waitLock;
|
||||
LOCKMODE lockmode = proc->waitLockMode;
|
||||
@@ -1182,7 +1182,7 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
|
||||
* specified XID are released.
|
||||
*/
|
||||
bool
|
||||
LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
|
||||
LockReleaseAll(LOCKMETHOD lockmethod, PGPROC *proc,
|
||||
bool allxids, TransactionId xid)
|
||||
{
|
||||
SHM_QUEUE *procHolders = &(proc->procHolders);
|
||||
@@ -1354,7 +1354,7 @@ LockShmemSize(int maxBackends)
|
||||
long max_table_size = NLOCKENTS(maxBackends);
|
||||
|
||||
size += MAXALIGN(sizeof(PROC_HDR)); /* ProcGlobal */
|
||||
size += maxBackends * MAXALIGN(sizeof(PROC)); /* each MyProc */
|
||||
size += maxBackends * MAXALIGN(sizeof(PGPROC)); /* each MyProc */
|
||||
size += MAX_LOCK_METHODS * MAXALIGN(sizeof(LOCKMETHODCTL)); /* each
|
||||
* lockMethodTable->ctl */
|
||||
|
||||
@@ -1383,7 +1383,7 @@ LockShmemSize(int maxBackends)
|
||||
void
|
||||
DumpLocks(void)
|
||||
{
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
SHM_QUEUE *procHolders;
|
||||
HOLDER *holder;
|
||||
LOCK *lock;
|
||||
@@ -1427,7 +1427,7 @@ DumpLocks(void)
|
||||
void
|
||||
DumpAllLocks(void)
|
||||
{
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
HOLDER *holder;
|
||||
LOCK *lock;
|
||||
int lockmethod = DEFAULT_LOCKMETHOD;
|
||||
|
@@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.10 2002/05/05 00:03:28 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.11 2002/06/11 13:40:51 wieck Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -29,12 +29,12 @@
|
||||
|
||||
typedef struct LWLock
|
||||
{
|
||||
slock_t mutex; /* Protects LWLock and queue of PROCs */
|
||||
slock_t mutex; /* Protects LWLock and queue of PGPROCs */
|
||||
bool releaseOK; /* T if ok to release waiters */
|
||||
char exclusive; /* # of exclusive holders (0 or 1) */
|
||||
int shared; /* # of shared holders (0..MaxBackends) */
|
||||
PROC *head; /* head of list of waiting PROCs */
|
||||
PROC *tail; /* tail of list of waiting PROCs */
|
||||
PGPROC *head; /* head of list of waiting PGPROCs */
|
||||
PGPROC *tail; /* tail of list of waiting PGPROCs */
|
||||
/* tail is undefined when head is NULL */
|
||||
} LWLock;
|
||||
|
||||
@@ -197,7 +197,7 @@ void
|
||||
LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
{
|
||||
volatile LWLock *lock = LWLockArray + lockid;
|
||||
PROC *proc = MyProc;
|
||||
PGPROC *proc = MyProc;
|
||||
bool retry = false;
|
||||
int extraWaits = 0;
|
||||
|
||||
@@ -266,12 +266,12 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
/*
|
||||
* Add myself to wait queue.
|
||||
*
|
||||
* If we don't have a PROC structure, there's no way to wait. This
|
||||
* If we don't have a PGPROC structure, there's no way to wait. This
|
||||
* should never occur, since MyProc should only be null during
|
||||
* shared memory initialization.
|
||||
*/
|
||||
if (proc == NULL)
|
||||
elog(FATAL, "LWLockAcquire: can't wait without a PROC structure");
|
||||
elog(FATAL, "LWLockAcquire: can't wait without a PGPROC structure");
|
||||
|
||||
proc->lwWaiting = true;
|
||||
proc->lwExclusive = (mode == LW_EXCLUSIVE);
|
||||
@@ -401,8 +401,8 @@ void
|
||||
LWLockRelease(LWLockId lockid)
|
||||
{
|
||||
volatile LWLock *lock = LWLockArray + lockid;
|
||||
PROC *head;
|
||||
PROC *proc;
|
||||
PGPROC *head;
|
||||
PGPROC *proc;
|
||||
int i;
|
||||
|
||||
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
|
||||
@@ -446,7 +446,7 @@ LWLockRelease(LWLockId lockid)
|
||||
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
|
||||
{
|
||||
/*
|
||||
* Remove the to-be-awakened PROCs from the queue. If the
|
||||
* Remove the to-be-awakened PGPROCs from the queue. If the
|
||||
* front waiter wants exclusive lock, awaken him only.
|
||||
* Otherwise awaken as many waiters as want shared access.
|
||||
*/
|
||||
@@ -459,7 +459,7 @@ LWLockRelease(LWLockId lockid)
|
||||
proc = proc->lwWaitLink;
|
||||
}
|
||||
}
|
||||
/* proc is now the last PROC to be released */
|
||||
/* proc is now the last PGPROC to be released */
|
||||
lock->head = proc->lwWaitLink;
|
||||
proc->lwWaitLink = NULL;
|
||||
/* prevent additional wakeups until retryer gets to run */
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.119 2002/05/05 00:03:28 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.120 2002/06/11 13:40:51 wieck Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -55,12 +55,12 @@
|
||||
|
||||
int DeadlockTimeout = 1000;
|
||||
|
||||
PROC *MyProc = NULL;
|
||||
PGPROC *MyProc = NULL;
|
||||
|
||||
/*
|
||||
* This spinlock protects the freelist of recycled PROC structures.
|
||||
* This spinlock protects the freelist of recycled PGPROC structures.
|
||||
* We cannot use an LWLock because the LWLock manager depends on already
|
||||
* having a PROC and a wait semaphore! But these structures are touched
|
||||
* having a PGPROC and a wait semaphore! But these structures are touched
|
||||
* relatively infrequently (only at backend startup or shutdown) and not for
|
||||
* very long, so a spinlock is okay.
|
||||
*/
|
||||
@@ -68,7 +68,7 @@ static slock_t *ProcStructLock = NULL;
|
||||
|
||||
static PROC_HDR *ProcGlobal = NULL;
|
||||
|
||||
static PROC *DummyProc = NULL;
|
||||
static PGPROC *DummyProc = NULL;
|
||||
|
||||
static bool waitingForLock = false;
|
||||
static bool waitingForSignal = false;
|
||||
@@ -129,29 +129,29 @@ InitProcGlobal(int maxBackends)
|
||||
ProcGlobal->freeProcs = INVALID_OFFSET;
|
||||
|
||||
/*
|
||||
* Pre-create the PROC structures and create a semaphore for each.
|
||||
* Pre-create the PGPROC structures and create a semaphore for each.
|
||||
*/
|
||||
for (i = 0; i < maxBackends; i++)
|
||||
{
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
|
||||
proc = (PROC *) ShmemAlloc(sizeof(PROC));
|
||||
proc = (PGPROC *) ShmemAlloc(sizeof(PGPROC));
|
||||
if (!proc)
|
||||
elog(FATAL, "cannot create new proc: out of memory");
|
||||
MemSet(proc, 0, sizeof(PROC));
|
||||
MemSet(proc, 0, sizeof(PGPROC));
|
||||
PGSemaphoreCreate(&proc->sem);
|
||||
proc->links.next = ProcGlobal->freeProcs;
|
||||
ProcGlobal->freeProcs = MAKE_OFFSET(proc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pre-allocate a PROC structure for dummy (checkpoint) processes,
|
||||
* Pre-allocate a PGPROC structure for dummy (checkpoint) processes,
|
||||
* too. This does not get linked into the freeProcs list.
|
||||
*/
|
||||
DummyProc = (PROC *) ShmemAlloc(sizeof(PROC));
|
||||
DummyProc = (PGPROC *) ShmemAlloc(sizeof(PGPROC));
|
||||
if (!DummyProc)
|
||||
elog(FATAL, "cannot create new proc: out of memory");
|
||||
MemSet(DummyProc, 0, sizeof(PROC));
|
||||
MemSet(DummyProc, 0, sizeof(PGPROC));
|
||||
DummyProc->pid = 0; /* marks DummyProc as not in use */
|
||||
PGSemaphoreCreate(&DummyProc->sem);
|
||||
|
||||
@@ -183,7 +183,7 @@ InitProcess(void)
|
||||
|
||||
/*
|
||||
* Try to get a proc struct from the free list. If this fails,
|
||||
* we must be out of PROC structures (not to mention semaphores).
|
||||
* we must be out of PGPROC structures (not to mention semaphores).
|
||||
*/
|
||||
SpinLockAcquire(ProcStructLock);
|
||||
|
||||
@@ -191,14 +191,14 @@ InitProcess(void)
|
||||
|
||||
if (myOffset != INVALID_OFFSET)
|
||||
{
|
||||
MyProc = (PROC *) MAKE_PTR(myOffset);
|
||||
MyProc = (PGPROC *) MAKE_PTR(myOffset);
|
||||
procglobal->freeProcs = MyProc->links.next;
|
||||
SpinLockRelease(ProcStructLock);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If we reach here, all the PROCs are in use. This is one of
|
||||
* If we reach here, all the PGPROCs are in use. This is one of
|
||||
* the possible places to detect "too many backends", so give the
|
||||
* standard error message.
|
||||
*/
|
||||
@@ -236,7 +236,7 @@ InitProcess(void)
|
||||
PGSemaphoreReset(&MyProc->sem);
|
||||
|
||||
/*
|
||||
* Now that we have a PROC, we could try to acquire locks, so
|
||||
* Now that we have a PGPROC, we could try to acquire locks, so
|
||||
* initialize the deadlock checker.
|
||||
*/
|
||||
InitDeadLockChecking();
|
||||
@@ -246,7 +246,7 @@ InitProcess(void)
|
||||
* InitDummyProcess -- create a dummy per-process data structure
|
||||
*
|
||||
* This is called by checkpoint processes so that they will have a MyProc
|
||||
* value that's real enough to let them wait for LWLocks. The PROC and
|
||||
* value that's real enough to let them wait for LWLocks. The PGPROC and
|
||||
* sema that are assigned are the extra ones created during InitProcGlobal.
|
||||
*/
|
||||
void
|
||||
@@ -402,11 +402,11 @@ ProcKill(void)
|
||||
|
||||
SpinLockAcquire(ProcStructLock);
|
||||
|
||||
/* Return PROC structure (and semaphore) to freelist */
|
||||
/* Return PGPROC structure (and semaphore) to freelist */
|
||||
MyProc->links.next = procglobal->freeProcs;
|
||||
procglobal->freeProcs = MAKE_OFFSET(MyProc);
|
||||
|
||||
/* PROC struct isn't mine anymore */
|
||||
/* PGPROC struct isn't mine anymore */
|
||||
MyProc = NULL;
|
||||
|
||||
SpinLockRelease(ProcStructLock);
|
||||
@@ -414,7 +414,7 @@ ProcKill(void)
|
||||
|
||||
/*
|
||||
* DummyProcKill() -- Cut-down version of ProcKill for dummy (checkpoint)
|
||||
* processes. The PROC and sema are not released, only marked
|
||||
* processes. The PGPROC and sema are not released, only marked
|
||||
* as not-in-use.
|
||||
*/
|
||||
static void
|
||||
@@ -433,7 +433,7 @@ DummyProcKill(void)
|
||||
/* Mark DummyProc no longer in use */
|
||||
MyProc->pid = 0;
|
||||
|
||||
/* PROC struct isn't mine anymore */
|
||||
/* PGPROC struct isn't mine anymore */
|
||||
MyProc = NULL;
|
||||
}
|
||||
|
||||
@@ -506,7 +506,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
||||
int myHeldLocks = MyProc->heldLocks;
|
||||
bool early_deadlock = false;
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -531,7 +531,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
{
|
||||
int aheadRequests = 0;
|
||||
|
||||
proc = (PROC *) MAKE_PTR(waitQueue->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
|
||||
for (i = 0; i < waitQueue->size; i++)
|
||||
{
|
||||
/* Must he wait for me? */
|
||||
@@ -568,7 +568,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
}
|
||||
/* Nope, so advance to next waiter */
|
||||
aheadRequests |= (1 << proc->waitLockMode);
|
||||
proc = (PROC *) MAKE_PTR(proc->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(proc->links.next);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -579,7 +579,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
else
|
||||
{
|
||||
/* I hold no locks, so I can't push in front of anyone. */
|
||||
proc = (PROC *) &(waitQueue->links);
|
||||
proc = (PGPROC *) &(waitQueue->links);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -591,7 +591,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
|
||||
lock->waitMask |= (1 << lockmode);
|
||||
|
||||
/* Set up wait information in PROC object, too */
|
||||
/* Set up wait information in PGPROC object, too */
|
||||
MyProc->waitLock = lock;
|
||||
MyProc->waitHolder = holder;
|
||||
MyProc->waitLockMode = lockmode;
|
||||
@@ -685,20 +685,20 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
* works correctly for that case. To clean up in failure case, would need
|
||||
* to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
|
||||
*/
|
||||
PROC *
|
||||
ProcWakeup(PROC *proc, int errType)
|
||||
PGPROC *
|
||||
ProcWakeup(PGPROC *proc, int errType)
|
||||
{
|
||||
PROC *retProc;
|
||||
PGPROC *retProc;
|
||||
|
||||
/* assume that masterLock has been acquired */
|
||||
|
||||
/* Proc should be sleeping ... */
|
||||
if (proc->links.prev == INVALID_OFFSET ||
|
||||
proc->links.next == INVALID_OFFSET)
|
||||
return (PROC *) NULL;
|
||||
return (PGPROC *) NULL;
|
||||
|
||||
/* Save next process before we zap the list link */
|
||||
retProc = (PROC *) MAKE_PTR(proc->links.next);
|
||||
retProc = (PGPROC *) MAKE_PTR(proc->links.next);
|
||||
|
||||
/* Remove process from wait queue */
|
||||
SHMQueueDelete(&(proc->links));
|
||||
@@ -726,7 +726,7 @@ ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
|
||||
LOCKMETHODCTL *lockctl = lockMethodTable->ctl;
|
||||
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
||||
int queue_size = waitQueue->size;
|
||||
PROC *proc;
|
||||
PGPROC *proc;
|
||||
int aheadRequests = 0;
|
||||
|
||||
Assert(queue_size >= 0);
|
||||
@@ -734,7 +734,7 @@ ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
|
||||
if (queue_size == 0)
|
||||
return;
|
||||
|
||||
proc = (PROC *) MAKE_PTR(waitQueue->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
|
||||
|
||||
while (queue_size-- > 0)
|
||||
{
|
||||
@@ -769,7 +769,7 @@ ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
|
||||
* checks.
|
||||
*/
|
||||
aheadRequests |= (1 << lockmode);
|
||||
proc = (PROC *) MAKE_PTR(proc->links.next);
|
||||
proc = (PGPROC *) MAKE_PTR(proc->links.next);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -902,7 +902,7 @@ ProcCancelWaitForSignal(void)
|
||||
void
|
||||
ProcSendSignal(BackendId procId)
|
||||
{
|
||||
PROC *proc = BackendIdGetProc(procId);
|
||||
PGPROC *proc = BackendIdGetProc(procId);
|
||||
|
||||
if (proc != NULL)
|
||||
PGSemaphoreUnlock(&proc->sem);
|
||||
|
Reference in New Issue
Block a user