mirror of
https://github.com/postgres/postgres.git
synced 2025-11-10 17:42:29 +03:00
Remove dashes in comments that don't need them, rewrap with pgindent.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.64 2001/03/22 03:59:45 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.65 2001/03/22 06:16:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@@ -146,15 +146,14 @@ proc_exit(int code)
|
||||
/* do our shared memory exits first */
|
||||
shmem_exit(code);
|
||||
|
||||
/* ----------------
|
||||
* call all the callbacks registered before calling exit().
|
||||
/*
|
||||
* call all the callbacks registered before calling exit().
|
||||
*
|
||||
* Note that since we decrement on_proc_exit_index each time,
|
||||
* if a callback calls elog(ERROR) or elog(FATAL) then it won't
|
||||
* be invoked again when control comes back here (nor will the
|
||||
* previously-completed callbacks). So, an infinite loop
|
||||
* should not be possible.
|
||||
* ----------------
|
||||
* Note that since we decrement on_proc_exit_index each time, if a
|
||||
* callback calls elog(ERROR) or elog(FATAL) then it won't be invoked
|
||||
* again when control comes back here (nor will the
|
||||
* previously-completed callbacks). So, an infinite loop should not
|
||||
* be possible.
|
||||
*/
|
||||
while (--on_proc_exit_index >= 0)
|
||||
(*on_proc_exit_list[on_proc_exit_index].function) (code,
|
||||
@@ -177,12 +176,11 @@ shmem_exit(int code)
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "shmem_exit(%d)", code);
|
||||
|
||||
/* ----------------
|
||||
* call all the registered callbacks.
|
||||
/*
|
||||
* call all the registered callbacks.
|
||||
*
|
||||
* As with proc_exit(), we remove each callback from the list
|
||||
* before calling it, to avoid infinite loop in case of error.
|
||||
* ----------------
|
||||
* As with proc_exit(), we remove each callback from the list before
|
||||
* calling it, to avoid infinite loop in case of error.
|
||||
*/
|
||||
while (--on_shmem_exit_index >= 0)
|
||||
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
|
||||
@@ -387,40 +385,39 @@ IpcSemaphoreLock(IpcSemaphoreId semId, int sem, bool interruptOK)
|
||||
sops.sem_flg = 0;
|
||||
sops.sem_num = sem;
|
||||
|
||||
/* ----------------
|
||||
* Note: if errStatus is -1 and errno == EINTR then it means we
|
||||
* returned from the operation prematurely because we were
|
||||
* sent a signal. So we try and lock the semaphore again.
|
||||
/*
|
||||
* Note: if errStatus is -1 and errno == EINTR then it means we
|
||||
* returned from the operation prematurely because we were sent a
|
||||
* signal. So we try and lock the semaphore again.
|
||||
*
|
||||
* Each time around the loop, we check for a cancel/die interrupt.
|
||||
* We assume that if such an interrupt comes in while we are waiting,
|
||||
* it will cause the semop() call to exit with errno == EINTR, so that
|
||||
* we will be able to service the interrupt (if not in a critical
|
||||
* section already).
|
||||
* Each time around the loop, we check for a cancel/die interrupt. We
|
||||
* assume that if such an interrupt comes in while we are waiting, it
|
||||
* will cause the semop() call to exit with errno == EINTR, so that we
|
||||
* will be able to service the interrupt (if not in a critical section
|
||||
* already).
|
||||
*
|
||||
* Once we acquire the lock, we do NOT check for an interrupt before
|
||||
* returning. The caller needs to be able to record ownership of
|
||||
* the lock before any interrupt can be accepted.
|
||||
* Once we acquire the lock, we do NOT check for an interrupt before
|
||||
* returning. The caller needs to be able to record ownership of the
|
||||
* lock before any interrupt can be accepted.
|
||||
*
|
||||
* There is a window of a few instructions between CHECK_FOR_INTERRUPTS
|
||||
* and entering the semop() call. If a cancel/die interrupt occurs in
|
||||
* that window, we would fail to notice it until after we acquire the
|
||||
* lock (or get another interrupt to escape the semop()). We can avoid
|
||||
* this problem by temporarily setting ImmediateInterruptOK = true
|
||||
* before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this
|
||||
* interval will execute directly. However, there is a huge pitfall:
|
||||
* there is another window of a few instructions after the semop()
|
||||
* before we are able to reset ImmediateInterruptOK. If an interrupt
|
||||
* occurs then, we'll lose control, which means that the lock has been
|
||||
* acquired but our caller did not get a chance to record the fact.
|
||||
* Therefore, we only set ImmediateInterruptOK if the caller tells us
|
||||
* it's OK to do so, ie, the caller does not need to record acquiring
|
||||
* the lock. (This is currently true for lockmanager locks, since the
|
||||
* process that granted us the lock did all the necessary state updates.
|
||||
* It's not true for SysV semaphores used to emulate spinlocks --- but
|
||||
* our performance on such platforms is so horrible anyway that I'm
|
||||
* not going to worry too much about it.)
|
||||
* ----------------
|
||||
* There is a window of a few instructions between CHECK_FOR_INTERRUPTS
|
||||
* and entering the semop() call. If a cancel/die interrupt occurs in
|
||||
* that window, we would fail to notice it until after we acquire the
|
||||
* lock (or get another interrupt to escape the semop()). We can
|
||||
* avoid this problem by temporarily setting ImmediateInterruptOK =
|
||||
* true before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in
|
||||
* this interval will execute directly. However, there is a huge
|
||||
* pitfall: there is another window of a few instructions after the
|
||||
* semop() before we are able to reset ImmediateInterruptOK. If an
|
||||
* interrupt occurs then, we'll lose control, which means that the
|
||||
* lock has been acquired but our caller did not get a chance to
|
||||
* record the fact. Therefore, we only set ImmediateInterruptOK if the
|
||||
* caller tells us it's OK to do so, ie, the caller does not need to
|
||||
* record acquiring the lock. (This is currently true for lockmanager
|
||||
* locks, since the process that granted us the lock did all the
|
||||
* necessary state updates. It's not true for SysV semaphores used to
|
||||
* emulate spinlocks --- but our performance on such platforms is so
|
||||
* horrible anyway that I'm not going to worry too much about it.)
|
||||
*/
|
||||
do
|
||||
{
|
||||
@@ -452,12 +449,11 @@ IpcSemaphoreUnlock(IpcSemaphoreId semId, int sem)
|
||||
sops.sem_num = sem;
|
||||
|
||||
|
||||
/* ----------------
|
||||
* Note: if errStatus is -1 and errno == EINTR then it means we
|
||||
* returned from the operation prematurely because we were
|
||||
* sent a signal. So we try and unlock the semaphore again.
|
||||
* Not clear this can really happen, but might as well cope.
|
||||
* ----------------
|
||||
/*
|
||||
* Note: if errStatus is -1 and errno == EINTR then it means we
|
||||
* returned from the operation prematurely because we were sent a
|
||||
* signal. So we try and unlock the semaphore again. Not clear this
|
||||
* can really happen, but might as well cope.
|
||||
*/
|
||||
do
|
||||
{
|
||||
@@ -486,11 +482,10 @@ IpcSemaphoreTryLock(IpcSemaphoreId semId, int sem)
|
||||
sops.sem_flg = IPC_NOWAIT; /* but don't block */
|
||||
sops.sem_num = sem;
|
||||
|
||||
/* ----------------
|
||||
* Note: if errStatus is -1 and errno == EINTR then it means we
|
||||
* returned from the operation prematurely because we were
|
||||
* sent a signal. So we try and lock the semaphore again.
|
||||
* ----------------
|
||||
/*
|
||||
* Note: if errStatus is -1 and errno == EINTR then it means we
|
||||
* returned from the operation prematurely because we were sent a
|
||||
* signal. So we try and lock the semaphore again.
|
||||
*/
|
||||
do
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.99 2001/03/22 03:59:46 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.100 2001/03/22 06:16:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -265,18 +265,15 @@ InitProcess(void)
|
||||
MyProc->waitHolder = NULL;
|
||||
SHMQueueInit(&(MyProc->procHolders));
|
||||
|
||||
/* ----------------------
|
||||
/*
|
||||
* Release the lock.
|
||||
* ----------------------
|
||||
*/
|
||||
SpinRelease(ProcStructLock);
|
||||
|
||||
/* -------------------------
|
||||
* Install ourselves in the shmem index table. The name to
|
||||
* use is determined by the OS-assigned process id. That
|
||||
* allows the cleanup process to find us after any untimely
|
||||
* exit.
|
||||
* -------------------------
|
||||
/*
|
||||
* Install ourselves in the shmem index table. The name to use is
|
||||
* determined by the OS-assigned process id. That allows the cleanup
|
||||
* process to find us after any untimely exit.
|
||||
*/
|
||||
location = MAKE_OFFSET(MyProc);
|
||||
if ((!ShmemPIDLookup(MyProcPid, &location)) ||
|
||||
@@ -531,23 +528,24 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
|
||||
#endif
|
||||
|
||||
/* ----------------------
|
||||
/*
|
||||
* Determine where to add myself in the wait queue.
|
||||
*
|
||||
* Normally I should go at the end of the queue. However, if I already
|
||||
* hold locks that conflict with the request of any previous waiter,
|
||||
* put myself in the queue just in front of the first such waiter.
|
||||
* This is not a necessary step, since deadlock detection would move
|
||||
* me to before that waiter anyway; but it's relatively cheap to detect
|
||||
* such a conflict immediately, and avoid delaying till deadlock timeout.
|
||||
* me to before that waiter anyway; but it's relatively cheap to
|
||||
* detect such a conflict immediately, and avoid delaying till
|
||||
* deadlock timeout.
|
||||
*
|
||||
* Special case: if I find I should go in front of some waiter, check
|
||||
* to see if I conflict with already-held locks or the requests before
|
||||
* Special case: if I find I should go in front of some waiter, check to
|
||||
* see if I conflict with already-held locks or the requests before
|
||||
* that waiter. If not, then just grant myself the requested lock
|
||||
* immediately. This is the same as the test for immediate grant in
|
||||
* LockAcquire, except we are only considering the part of the wait queue
|
||||
* before my insertion point.
|
||||
* ----------------------
|
||||
* LockAcquire, except we are only considering the part of the wait
|
||||
* queue before my insertion point.
|
||||
*
|
||||
*/
|
||||
if (myHeldLocks != 0)
|
||||
{
|
||||
@@ -598,9 +596,9 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
proc = (PROC *) &(waitQueue->links);
|
||||
}
|
||||
|
||||
/* -------------------
|
||||
* Insert self into queue, ahead of the given proc (or at tail of queue).
|
||||
* -------------------
|
||||
/*
|
||||
* Insert self into queue, ahead of the given proc (or at tail of
|
||||
* queue).
|
||||
*/
|
||||
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
|
||||
waitQueue->size++;
|
||||
@@ -617,18 +615,17 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
/* mark that we are waiting for a lock */
|
||||
waitingForLock = true;
|
||||
|
||||
/* -------------------
|
||||
/*
|
||||
* Release the locktable's spin lock.
|
||||
*
|
||||
* NOTE: this may also cause us to exit critical-section state,
|
||||
* possibly allowing a cancel/die interrupt to be accepted.
|
||||
* This is OK because we have recorded the fact that we are waiting for
|
||||
* a lock, and so LockWaitCancel will clean up if cancel/die happens.
|
||||
* -------------------
|
||||
* NOTE: this may also cause us to exit critical-section state, possibly
|
||||
* allowing a cancel/die interrupt to be accepted. This is OK because
|
||||
* we have recorded the fact that we are waiting for a lock, and so
|
||||
* LockWaitCancel will clean up if cancel/die happens.
|
||||
*/
|
||||
SpinRelease(spinlock);
|
||||
|
||||
/* --------------
|
||||
/*
|
||||
* Set timer so we can wake up after awhile and check for a deadlock.
|
||||
* If a deadlock is detected, the handler releases the process's
|
||||
* semaphore and sets MyProc->errType = STATUS_ERROR, allowing us to
|
||||
@@ -637,9 +634,8 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
* By delaying the check until we've waited for a bit, we can avoid
|
||||
* running the rather expensive deadlock-check code in most cases.
|
||||
*
|
||||
* Need to zero out struct to set the interval and the microseconds fields
|
||||
* to 0.
|
||||
* --------------
|
||||
* Need to zero out struct to set the interval and the microseconds
|
||||
* fields to 0.
|
||||
*/
|
||||
#ifndef __BEOS__
|
||||
MemSet(&timeval, 0, sizeof(struct itimerval));
|
||||
@@ -653,26 +649,24 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
|
||||
#endif
|
||||
|
||||
/* --------------
|
||||
/*
|
||||
* If someone wakes us between SpinRelease and IpcSemaphoreLock,
|
||||
* IpcSemaphoreLock will not block. The wakeup is "saved" by
|
||||
* the semaphore implementation. Note also that if HandleDeadLock
|
||||
* is invoked but does not detect a deadlock, IpcSemaphoreLock()
|
||||
* will continue to wait. There used to be a loop here, but it
|
||||
* was useless code...
|
||||
* IpcSemaphoreLock will not block. The wakeup is "saved" by the
|
||||
* semaphore implementation. Note also that if HandleDeadLock is
|
||||
* invoked but does not detect a deadlock, IpcSemaphoreLock() will
|
||||
* continue to wait. There used to be a loop here, but it was useless
|
||||
* code...
|
||||
*
|
||||
* We pass interruptOK = true, which eliminates a window in which
|
||||
* cancel/die interrupts would be held off undesirably. This is a
|
||||
* promise that we don't mind losing control to a cancel/die interrupt
|
||||
* here. We don't, because we have no state-change work to do after
|
||||
* being granted the lock (the grantor did it all).
|
||||
* --------------
|
||||
*/
|
||||
IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum, true);
|
||||
|
||||
/* ---------------
|
||||
/*
|
||||
* Disable the timer, if it's still running
|
||||
* ---------------
|
||||
*/
|
||||
#ifndef __BEOS__
|
||||
MemSet(&timeval, 0, sizeof(struct itimerval));
|
||||
@@ -688,12 +682,11 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
*/
|
||||
waitingForLock = false;
|
||||
|
||||
/* ----------------
|
||||
/*
|
||||
* Re-acquire the locktable's spin lock.
|
||||
*
|
||||
* We could accept a cancel/die interrupt here. That's OK because
|
||||
* the lock is now registered as being held by this process.
|
||||
* ----------------
|
||||
* We could accept a cancel/die interrupt here. That's OK because the
|
||||
* lock is now registered as being held by this process.
|
||||
*/
|
||||
SpinAcquire(spinlock);
|
||||
|
||||
@@ -825,17 +818,18 @@ HandleDeadLock(SIGNAL_ARGS)
|
||||
*/
|
||||
LockLockTable();
|
||||
|
||||
/* ---------------------
|
||||
/*
|
||||
* Check to see if we've been awoken by anyone in the interim.
|
||||
*
|
||||
* If we have we can return and resume our transaction -- happy day.
|
||||
* Before we are awoken the process releasing the lock grants it to
|
||||
* us so we know that we don't have to wait anymore.
|
||||
* Before we are awoken the process releasing the lock grants it to us
|
||||
* so we know that we don't have to wait anymore.
|
||||
*
|
||||
* We check by looking to see if we've been unlinked from the wait queue.
|
||||
* This is quicker than checking our semaphore's state, since no kernel
|
||||
* call is needed, and it is safe because we hold the locktable lock.
|
||||
* ---------------------
|
||||
* This is quicker than checking our semaphore's state, since no
|
||||
* kernel call is needed, and it is safe because we hold the locktable
|
||||
* lock.
|
||||
*
|
||||
*/
|
||||
if (MyProc->links.prev == INVALID_OFFSET ||
|
||||
MyProc->links.next == INVALID_OFFSET)
|
||||
@@ -858,37 +852,34 @@ HandleDeadLock(SIGNAL_ARGS)
|
||||
return;
|
||||
}
|
||||
|
||||
/* ------------------------
|
||||
/*
|
||||
* Oops. We have a deadlock.
|
||||
*
|
||||
* Get this process out of wait state.
|
||||
* ------------------------
|
||||
*/
|
||||
RemoveFromWaitQueue(MyProc);
|
||||
|
||||
/* -------------
|
||||
* Set MyProc->errType to STATUS_ERROR so that ProcSleep will
|
||||
* report an error after we return from this signal handler.
|
||||
* -------------
|
||||
/*
|
||||
* Set MyProc->errType to STATUS_ERROR so that ProcSleep will report
|
||||
* an error after we return from this signal handler.
|
||||
*/
|
||||
MyProc->errType = STATUS_ERROR;
|
||||
|
||||
/* ------------------
|
||||
* Unlock my semaphore so that the interrupted ProcSleep() call can finish.
|
||||
* ------------------
|
||||
/*
|
||||
* Unlock my semaphore so that the interrupted ProcSleep() call can
|
||||
* finish.
|
||||
*/
|
||||
IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum);
|
||||
|
||||
/* ------------------
|
||||
* We're done here. Transaction abort caused by the error that ProcSleep
|
||||
* will raise will cause any other locks we hold to be released, thus
|
||||
* allowing other processes to wake up; we don't need to do that here.
|
||||
* NOTE: an exception is that releasing locks we hold doesn't consider
|
||||
* the possibility of waiters that were blocked behind us on the lock
|
||||
* we just failed to get, and might now be wakable because we're not
|
||||
* in front of them anymore. However, RemoveFromWaitQueue took care of
|
||||
* waking up any such processes.
|
||||
* ------------------
|
||||
/*
|
||||
* We're done here. Transaction abort caused by the error that
|
||||
* ProcSleep will raise will cause any other locks we hold to be
|
||||
* released, thus allowing other processes to wake up; we don't need
|
||||
* to do that here. NOTE: an exception is that releasing locks we hold
|
||||
* doesn't consider the possibility of waiters that were blocked
|
||||
* behind us on the lock we just failed to get, and might now be
|
||||
* wakable because we're not in front of them anymore. However,
|
||||
* RemoveFromWaitQueue took care of waking up any such processes.
|
||||
*/
|
||||
UnlockLockTable();
|
||||
errno = save_errno;
|
||||
|
||||
Reference in New Issue
Block a user