diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 4ed5c3eb8f0..91ce3559a15 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.224 2005/12/28 23:22:50 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.225 2005/12/29 18:08:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -695,10 +695,10 @@ begin:; /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); LogwrtRqst = xlogctl->LogwrtRqst; LogwrtResult = xlogctl->LogwrtResult; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); } /* @@ -940,13 +940,13 @@ begin:; /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); /* advance global request to include new block(s) */ if (XLByteLT(xlogctl->LogwrtRqst.Write, WriteRqst)) xlogctl->LogwrtRqst.Write = WriteRqst; /* update local result copy while I have the chance */ LogwrtResult = xlogctl->LogwrtResult; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); } ProcLastRecEnd = RecPtr; @@ -1175,11 +1175,11 @@ AdvanceXLInsertBuffer(void) /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); if (XLByteLT(xlogctl->LogwrtRqst.Write, FinishedPageRqstPtr)) xlogctl->LogwrtRqst.Write = FinishedPageRqstPtr; LogwrtResult = xlogctl->LogwrtResult; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); } update_needed = false; /* Did the shared-request update */ @@ -1560,13 +1560,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); xlogctl->LogwrtResult = LogwrtResult; if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write)) xlogctl->LogwrtRqst.Write = LogwrtResult.Write; if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush)) xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); } Write->LogwrtResult = LogwrtResult; @@ -1618,11 +1618,11 @@ XLogFlush(XLogRecPtr record) /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write)) WriteRqstPtr = xlogctl->LogwrtRqst.Write; LogwrtResult = xlogctl->LogwrtResult; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); } /* done already? */ @@ -4984,10 +4984,10 @@ GetRedoRecPtr(void) /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); Assert(XLByteLE(RedoRecPtr, xlogctl->Insert.RedoRecPtr)); RedoRecPtr = xlogctl->Insert.RedoRecPtr; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); return RedoRecPtr; } @@ -5165,9 +5165,9 @@ CreateCheckPoint(bool shutdown, bool force) /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); + SpinLockAcquire(&xlogctl->info_lck); RedoRecPtr = xlogctl->Insert.RedoRecPtr = checkPoint.redo; - SpinLockRelease_NoHoldoff(&xlogctl->info_lck); + SpinLockRelease(&xlogctl->info_lck); } /* diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 78998826e62..a8a37e5ca3c 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.200 2005/11/22 18:17:19 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.201 2005/12/29 18:08:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -442,7 +442,7 @@ BufferAlloc(Relation reln, /* * Need to lock the buffer header too in order to change its tag. */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); /* * Somebody could have pinned or re-dirtied the buffer while we were @@ -453,7 +453,7 @@ BufferAlloc(Relation reln, if (buf->refcount == 1 && !(buf->flags & BM_DIRTY)) break; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); BufTableDelete(&newTag); LWLockRelease(BufMappingLock); UnpinBuffer(buf, true, false /* evidently recently used */ ); @@ -473,7 +473,7 @@ BufferAlloc(Relation reln, buf->flags |= BM_TAG_VALID; buf->usage_count = 0; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); if (oldFlags & BM_TAG_VALID) BufTableDelete(&oldTag); @@ -529,13 +529,13 @@ retry: */ LWLockAcquire(BufMappingLock, LW_EXCLUSIVE); - /* Re-lock the buffer header (NoHoldoff since we have an LWLock) */ - LockBufHdr_NoHoldoff(buf); + /* Re-lock the buffer header */ + LockBufHdr(buf); /* If it's changed while we were waiting for lock, do nothing */ if (!BUFFERTAGS_EQUAL(buf->tag, oldTag)) { - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); LWLockRelease(BufMappingLock); return; } @@ -551,7 +551,7 @@ retry: */ if (buf->refcount != 0) { - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); LWLockRelease(BufMappingLock); /* safety check: should definitely not be our *own* pin */ if (PrivateRefCount[buf->buf_id] != 0) @@ -569,7 +569,7 @@ retry: buf->flags = 0; buf->usage_count = 0; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); /* * Remove the buffer from the lookup hashtable, if it was in there. @@ -729,15 +729,10 @@ PinBuffer(volatile BufferDesc *buf) if (PrivateRefCount[b] == 0) { - /* - * Use NoHoldoff here because we don't want the unlock to be a - * potential place to honor a QueryCancel request. (The caller should - * be holding off interrupts anyway.) - */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); buf->refcount++; result = (buf->flags & BM_VALID) != 0; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); } else { @@ -766,14 +761,11 @@ PinBuffer_Locked(volatile BufferDesc *buf) if (PrivateRefCount[b] == 0) buf->refcount++; - /* NoHoldoff since we mustn't accept cancel interrupt here */ - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); PrivateRefCount[b]++; Assert(PrivateRefCount[b] > 0); ResourceOwnerRememberBuffer(CurrentResourceOwner, BufferDescriptorGetBuffer(buf)); - /* Now we can accept cancel */ - RESUME_INTERRUPTS(); } /* @@ -811,8 +803,7 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess) Assert(!LWLockHeldByMe(buf->content_lock)); Assert(!LWLockHeldByMe(buf->io_in_progress_lock)); - /* NoHoldoff ensures we don't lose control before sending signal */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); /* Decrement the shared reference count */ Assert(buf->refcount > 0); @@ -841,11 +832,11 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess) int wait_backend_pid = buf->wait_backend_pid; buf->flags &= ~BM_PIN_COUNT_WAITER; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); ProcSendSignal(wait_backend_pid); } else - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); /* * If VACUUM is releasing an otherwise-unused buffer, send it to the @@ -1300,9 +1291,9 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln) */ /* To check if block content changes while flushing. - vadim 01/17/97 */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); buf->flags &= ~BM_JUST_DIRTIED; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); smgrwrite(reln, buf->tag.blockNum, @@ -1693,7 +1684,7 @@ UnlockBuffers(void) { HOLD_INTERRUPTS(); /* don't want to die() partway through... */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); /* * Don't complain if flag bit not set; it could have been reset but we @@ -1703,7 +1694,7 @@ UnlockBuffers(void) buf->wait_backend_pid == MyProcPid) buf->flags &= ~BM_PIN_COUNT_WAITER; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); ProcCancelWaitForSignal(); @@ -1741,9 +1732,9 @@ LockBuffer(Buffer buffer, int mode) * that it's critical to set dirty bit *before* logging changes with * XLogInsert() - see comments in SyncOneBuffer(). */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED); - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); } else elog(ERROR, "unrecognized buffer lock mode: %d", mode); @@ -1773,9 +1764,9 @@ ConditionalLockBuffer(Buffer buffer) * that it's critical to set dirty bit *before* logging changes with * XLogInsert() - see comments in SyncOneBuffer(). */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED); - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); return true; } @@ -1827,25 +1818,25 @@ LockBufferForCleanup(Buffer buffer) { /* Try to acquire lock */ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - LockBufHdr_NoHoldoff(bufHdr); + LockBufHdr(bufHdr); Assert(bufHdr->refcount > 0); if (bufHdr->refcount == 1) { /* Successfully acquired exclusive lock with pincount 1 */ - UnlockBufHdr_NoHoldoff(bufHdr); + UnlockBufHdr(bufHdr); return; } /* Failed, so mark myself as waiting for pincount 1 */ if (bufHdr->flags & BM_PIN_COUNT_WAITER) { - UnlockBufHdr_NoHoldoff(bufHdr); + UnlockBufHdr(bufHdr); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); elog(ERROR, "multiple backends attempting to wait for pincount 1"); } bufHdr->wait_backend_pid = MyProcPid; bufHdr->flags |= BM_PIN_COUNT_WAITER; PinCountWaitBuf = bufHdr; - UnlockBufHdr_NoHoldoff(bufHdr); + UnlockBufHdr(bufHdr); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* Wait to be signaled by UnpinBuffer() */ ProcWaitForSignal(); @@ -1926,8 +1917,7 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput) */ LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE); - /* NoHoldoff is OK since we now have an LWLock */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); if (!(buf->flags & BM_IO_IN_PROGRESS)) break; @@ -1938,7 +1928,7 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput) * an error (see AbortBufferIO). If that's the case, we must wait for * him to get unwedged. */ - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); LWLockRelease(buf->io_in_progress_lock); WaitIO(buf); } @@ -1948,14 +1938,14 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput) if (forInput ? (buf->flags & BM_VALID) : !(buf->flags & BM_DIRTY)) { /* someone else already did the I/O */ - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); LWLockRelease(buf->io_in_progress_lock); return false; } buf->flags |= BM_IO_IN_PROGRESS; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); InProgressBuf = buf; IsForInput = forInput; @@ -1986,8 +1976,7 @@ TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty, { Assert(buf == InProgressBuf); - /* NoHoldoff is OK since we must have an LWLock */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); Assert(buf->flags & BM_IO_IN_PROGRESS); buf->flags &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR); @@ -1995,7 +1984,7 @@ TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty, buf->flags &= ~BM_DIRTY; buf->flags |= set_flag_bits; - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); InProgressBuf = NULL; @@ -2026,15 +2015,14 @@ AbortBufferIO(void) */ LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE); - /* NoHoldoff is OK since we now have an LWLock */ - LockBufHdr_NoHoldoff(buf); + LockBufHdr(buf); Assert(buf->flags & BM_IO_IN_PROGRESS); if (IsForInput) { Assert(!(buf->flags & BM_DIRTY)); /* We'd better not think buffer is valid yet */ Assert(!(buf->flags & BM_VALID)); - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); } else { @@ -2042,7 +2030,7 @@ AbortBufferIO(void) sv_flags = buf->flags; Assert(sv_flags & BM_DIRTY); - UnlockBufHdr_NoHoldoff(buf); + UnlockBufHdr(buf); /* Issue notice if this is not the first failure... */ if (sv_flags & BM_IO_ERROR) { diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 01b578c635a..e6865563b39 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.88 2005/11/22 18:17:20 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.89 2005/12/29 18:08:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -58,6 +58,7 @@ #include "postgres.h" #include "access/transam.h" +#include "miscadmin.h" #include "storage/pg_shmem.h" #include "storage/spin.h" #include "utils/tqual.h" diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index e1edabde905..ae9e43de586 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -15,7 +15,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.36 2005/12/11 21:02:18 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.37 2005/12/29 18:08:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -24,6 +24,7 @@ #include "access/clog.h" #include "access/multixact.h" #include "access/subtrans.h" +#include "miscadmin.h" #include "storage/lwlock.h" #include "storage/proc.h" #include "storage/spin.h" @@ -301,7 +302,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) bool mustwait; /* Acquire mutex. Time spent holding mutex should be short! */ - SpinLockAcquire_NoHoldoff(&lock->mutex); + SpinLockAcquire(&lock->mutex); /* If retrying, allow LWLockRelease to release waiters again */ if (retry) @@ -340,7 +341,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) * memory initialization. */ if (proc == NULL) - elog(FATAL, "cannot wait without a PGPROC structure"); + elog(PANIC, "cannot wait without a PGPROC structure"); proc->lwWaiting = true; proc->lwExclusive = (mode == LW_EXCLUSIVE); @@ -352,7 +353,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) lock->tail = proc; /* Can release the mutex now */ - SpinLockRelease_NoHoldoff(&lock->mutex); + SpinLockRelease(&lock->mutex); /* * Wait until awakened. @@ -384,7 +385,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) } /* We are done updating shared state of the lock itself. */ - SpinLockRelease_NoHoldoff(&lock->mutex); + SpinLockRelease(&lock->mutex); /* Add lock to list of locks held by this backend */ held_lwlocks[num_held_lwlocks++] = lockid; @@ -423,7 +424,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) HOLD_INTERRUPTS(); /* Acquire mutex. Time spent holding mutex should be short! */ - SpinLockAcquire_NoHoldoff(&lock->mutex); + SpinLockAcquire(&lock->mutex); /* If I can get the lock, do so quickly. */ if (mode == LW_EXCLUSIVE) @@ -448,7 +449,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) } /* We are done updating shared state of the lock itself. */ - SpinLockRelease_NoHoldoff(&lock->mutex); + SpinLockRelease(&lock->mutex); if (mustwait) { @@ -494,7 +495,7 @@ LWLockRelease(LWLockId lockid) held_lwlocks[i] = held_lwlocks[i + 1]; /* Acquire mutex. Time spent holding mutex should be short! */ - SpinLockAcquire_NoHoldoff(&lock->mutex); + SpinLockAcquire(&lock->mutex); /* Release my hold on lock */ if (lock->exclusive > 0) @@ -542,7 +543,7 @@ LWLockRelease(LWLockId lockid) } /* We are done updating shared state of the lock itself. */ - SpinLockRelease_NoHoldoff(&lock->mutex); + SpinLockRelease(&lock->mutex); /* * Awaken any waiters I removed from the queue. diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 3bab9e85b40..71fa0b08d48 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -8,7 +8,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.83 2005/11/22 18:17:31 momjian Exp $ + * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.84 2005/12/29 18:08:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -138,24 +138,16 @@ typedef struct sbufdesc #define FREENEXT_NOT_IN_LIST (-2) /* - * Macros for acquiring/releasing a buffer header's spinlock. The - * NoHoldoff cases may be used when we know that we hold some LWLock - * and therefore interrupts are already held off. Do not apply these - * to local buffers! + * Macros for acquiring/releasing a shared buffer header's spinlock. + * Do not apply these to local buffers! * * Note: as a general coding rule, if you are using these then you probably - * want to be using a volatile-qualified pointer to the buffer header, to + * need to be using a volatile-qualified pointer to the buffer header, to * ensure that the compiler doesn't rearrange accesses to the header to * occur before or after the spinlock is acquired/released. */ -#define LockBufHdr(bufHdr) \ - SpinLockAcquire(&(bufHdr)->buf_hdr_lock) -#define UnlockBufHdr(bufHdr) \ - SpinLockRelease(&(bufHdr)->buf_hdr_lock) -#define LockBufHdr_NoHoldoff(bufHdr) \ - SpinLockAcquire_NoHoldoff(&(bufHdr)->buf_hdr_lock) -#define UnlockBufHdr_NoHoldoff(bufHdr) \ - SpinLockRelease_NoHoldoff(&(bufHdr)->buf_hdr_lock) +#define LockBufHdr(bufHdr) SpinLockAcquire(&(bufHdr)->buf_hdr_lock) +#define UnlockBufHdr(bufHdr) SpinLockRelease(&(bufHdr)->buf_hdr_lock) /* in buf_init.c */ diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h index bd01d4ae0c1..cdfd43a06a1 100644 --- a/src/include/storage/spin.h +++ b/src/include/storage/spin.h @@ -14,17 +14,9 @@ * Acquire a spinlock, waiting if necessary. * Time out and abort() if unable to acquire the lock in a * "reasonable" amount of time --- typically ~ 1 minute. - * Cancel/die interrupts are held off until the lock is released. * * void SpinLockRelease(volatile slock_t *lock) * Unlock a previously acquired lock. - * Release the cancel/die interrupt holdoff. - * - * void SpinLockAcquire_NoHoldoff(volatile slock_t *lock) - * void SpinLockRelease_NoHoldoff(volatile slock_t *lock) - * Same as above, except no interrupt holdoff processing is done. - * This pair of macros may be used when there is a surrounding - * interrupt holdoff. * * bool SpinLockFree(slock_t *lock) * Tests if the lock is free. Returns TRUE if free, FALSE if locked. @@ -43,14 +35,21 @@ * protects shared data with a spinlock MUST reference that shared * data through a volatile pointer. * + * Keep in mind the coding rule that spinlocks must not be held for more + * than a few instructions. In particular, we assume it is not possible + * for a CHECK_FOR_INTERRUPTS() to occur while holding a spinlock, and so + * it is not necessary to do HOLD/RESUME_INTERRUPTS() in these macros. + * * These macros are implemented in terms of hardware-dependent macros - * supplied by s_lock.h. + * supplied by s_lock.h. There is not currently any extra functionality + * added by this header, but there has been in the past and may someday + * be again. * * * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/storage/spin.h,v 1.26 2005/10/13 06:17:34 neilc Exp $ + * $PostgreSQL: pgsql/src/include/storage/spin.h,v 1.27 2005/12/29 18:08:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -58,26 +57,13 @@ #define SPIN_H #include "storage/s_lock.h" -#include "miscadmin.h" #define SpinLockInit(lock) S_INIT_LOCK(lock) -#define SpinLockAcquire(lock) \ - do { \ - HOLD_INTERRUPTS(); \ - S_LOCK(lock); \ - } while (0) +#define SpinLockAcquire(lock) S_LOCK(lock) -#define SpinLockAcquire_NoHoldoff(lock) S_LOCK(lock) - -#define SpinLockRelease(lock) \ - do { \ - S_UNLOCK(lock); \ - RESUME_INTERRUPTS(); \ - } while (0) - -#define SpinLockRelease_NoHoldoff(lock) S_UNLOCK(lock) +#define SpinLockRelease(lock) S_UNLOCK(lock) #define SpinLockFree(lock) S_LOCK_FREE(lock)