mirror of
https://github.com/postgres/postgres.git
synced 2025-11-16 15:02:33 +03:00
Standard pgindent run for 8.1.
This commit is contained in:
@@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.90 2004/12/31 22:00:56 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.91 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -74,8 +74,8 @@ void
|
||||
proc_exit(int code)
|
||||
{
|
||||
/*
|
||||
* Once we set this flag, we are committed to exit. Any ereport()
|
||||
* will NOT send control back to the main loop, but right back here.
|
||||
* Once we set this flag, we are committed to exit. Any ereport() will
|
||||
* NOT send control back to the main loop, but right back here.
|
||||
*/
|
||||
proc_exit_inprogress = true;
|
||||
|
||||
@@ -100,15 +100,14 @@ proc_exit(int code)
|
||||
/*
|
||||
* call all the callbacks registered before calling exit().
|
||||
*
|
||||
* Note that since we decrement on_proc_exit_index each time, if a
|
||||
* callback calls ereport(ERROR) or ereport(FATAL) then it won't be
|
||||
* invoked again when control comes back here (nor will the
|
||||
* previously-completed callbacks). So, an infinite loop should not
|
||||
* be possible.
|
||||
* Note that since we decrement on_proc_exit_index each time, if a callback
|
||||
* calls ereport(ERROR) or ereport(FATAL) then it won't be invoked again
|
||||
* when control comes back here (nor will the previously-completed
|
||||
* callbacks). So, an infinite loop should not be possible.
|
||||
*/
|
||||
while (--on_proc_exit_index >= 0)
|
||||
(*on_proc_exit_list[on_proc_exit_index].function) (code,
|
||||
on_proc_exit_list[on_proc_exit_index].arg);
|
||||
on_proc_exit_list[on_proc_exit_index].arg);
|
||||
|
||||
elog(DEBUG3, "exit(%d)", code);
|
||||
exit(code);
|
||||
@@ -128,12 +127,12 @@ shmem_exit(int code)
|
||||
/*
|
||||
* call all the registered callbacks.
|
||||
*
|
||||
* As with proc_exit(), we remove each callback from the list before
|
||||
* calling it, to avoid infinite loop in case of error.
|
||||
* As with proc_exit(), we remove each callback from the list before calling
|
||||
* it, to avoid infinite loop in case of error.
|
||||
*/
|
||||
while (--on_shmem_exit_index >= 0)
|
||||
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
|
||||
on_shmem_exit_list[on_shmem_exit_index].arg);
|
||||
on_shmem_exit_list[on_shmem_exit_index].arg);
|
||||
|
||||
on_shmem_exit_index = 0;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.78 2005/08/20 23:26:20 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.79 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -66,13 +66,12 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
|
||||
|
||||
/*
|
||||
* Size of the Postgres shared-memory block is estimated via
|
||||
* moderately-accurate estimates for the big hogs, plus 100K for
|
||||
* the stuff that's too small to bother with estimating.
|
||||
* moderately-accurate estimates for the big hogs, plus 100K for the
|
||||
* stuff that's too small to bother with estimating.
|
||||
*
|
||||
* We take some care during this phase to ensure that the total
|
||||
* size request doesn't overflow size_t. If this gets through,
|
||||
* we don't need to be so careful during the actual allocation
|
||||
* phase.
|
||||
* We take some care during this phase to ensure that the total size
|
||||
* request doesn't overflow size_t. If this gets through, we don't
|
||||
* need to be so careful during the actual allocation phase.
|
||||
*/
|
||||
size = 100000;
|
||||
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
|
||||
@@ -115,9 +114,9 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We are reattaching to an existing shared memory segment.
|
||||
* This should only be reached in the EXEC_BACKEND case, and
|
||||
* even then only with makePrivate == false.
|
||||
* We are reattaching to an existing shared memory segment. This
|
||||
* should only be reached in the EXEC_BACKEND case, and even then only
|
||||
* with makePrivate == false.
|
||||
*/
|
||||
#ifdef EXEC_BACKEND
|
||||
Assert(!makePrivate);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.19 2005/08/20 23:26:20 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.20 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -112,9 +112,9 @@ PostmasterIsAlive(bool amDirectChild)
|
||||
{
|
||||
/*
|
||||
* Use kill() to see if the postmaster is still alive. This can
|
||||
* sometimes give a false positive result, since the postmaster's
|
||||
* PID may get recycled, but it is good enough for existing uses
|
||||
* by indirect children.
|
||||
* sometimes give a false positive result, since the postmaster's PID
|
||||
* may get recycled, but it is good enough for existing uses by
|
||||
* indirect children.
|
||||
*/
|
||||
return (kill(PostmasterPid, 0) == 0);
|
||||
}
|
||||
|
||||
@@ -16,14 +16,14 @@
|
||||
* prepared transactions. The xid and subxids fields of these are valid,
|
||||
* as is the procLocks list. They can be distinguished from regular backend
|
||||
* PGPROCs at need by checking for pid == 0.
|
||||
*
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.6 2005/08/20 23:26:20 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.7 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -44,8 +44,8 @@ typedef struct ProcArrayStruct
|
||||
int maxProcs; /* allocated size of procs array */
|
||||
|
||||
/*
|
||||
* We declare procs[] as 1 entry because C wants a fixed-size array,
|
||||
* but actually it is maxProcs entries long.
|
||||
* We declare procs[] as 1 entry because C wants a fixed-size array, but
|
||||
* actually it is maxProcs entries long.
|
||||
*/
|
||||
PGPROC *procs[1]; /* VARIABLE LENGTH ARRAY */
|
||||
} ProcArrayStruct;
|
||||
@@ -67,14 +67,12 @@ static long xc_slow_answer = 0;
|
||||
#define xc_slow_answer_inc() (xc_slow_answer++)
|
||||
|
||||
static void DisplayXidCache(void);
|
||||
|
||||
#else /* !XIDCACHE_DEBUG */
|
||||
|
||||
#define xc_by_recent_xmin_inc() ((void) 0)
|
||||
#define xc_by_main_xid_inc() ((void) 0)
|
||||
#define xc_by_child_xid_inc() ((void) 0)
|
||||
#define xc_slow_answer_inc() ((void) 0)
|
||||
|
||||
#endif /* XIDCACHE_DEBUG */
|
||||
|
||||
|
||||
@@ -88,7 +86,7 @@ ProcArrayShmemSize(void)
|
||||
|
||||
size = offsetof(ProcArrayStruct, procs);
|
||||
size = add_size(size, mul_size(sizeof(PGPROC *),
|
||||
add_size(MaxBackends, max_prepared_xacts)));
|
||||
add_size(MaxBackends, max_prepared_xacts)));
|
||||
|
||||
return size;
|
||||
}
|
||||
@@ -128,9 +126,9 @@ ProcArrayAdd(PGPROC *proc)
|
||||
if (arrayP->numProcs >= arrayP->maxProcs)
|
||||
{
|
||||
/*
|
||||
* Ooops, no room. (This really shouldn't happen, since there is
|
||||
* a fixed supply of PGPROC structs too, and so we should have
|
||||
* failed earlier.)
|
||||
* Ooops, no room. (This really shouldn't happen, since there is a
|
||||
* fixed supply of PGPROC structs too, and so we should have failed
|
||||
* earlier.)
|
||||
*/
|
||||
LWLockRelease(ProcArrayLock);
|
||||
ereport(FATAL,
|
||||
@@ -213,8 +211,8 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
bool locked;
|
||||
|
||||
/*
|
||||
* Don't bother checking a transaction older than RecentXmin; it
|
||||
* could not possibly still be running.
|
||||
* Don't bother checking a transaction older than RecentXmin; it could not
|
||||
* possibly still be running.
|
||||
*/
|
||||
if (TransactionIdPrecedes(xid, RecentXmin))
|
||||
{
|
||||
@@ -249,8 +247,8 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
}
|
||||
|
||||
/*
|
||||
* We can ignore main Xids that are younger than the target
|
||||
* Xid, since the target could not possibly be their child.
|
||||
* We can ignore main Xids that are younger than the target Xid, since
|
||||
* the target could not possibly be their child.
|
||||
*/
|
||||
if (TransactionIdPrecedes(xid, pxid))
|
||||
continue;
|
||||
@@ -272,11 +270,11 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the main Xid for step 3. We only need to remember
|
||||
* main Xids that have uncached children. (Note: there is no
|
||||
* race condition here because the overflowed flag cannot be
|
||||
* cleared, only set, while we hold ProcArrayLock. So we can't
|
||||
* miss an Xid that we need to worry about.)
|
||||
* Save the main Xid for step 3. We only need to remember main Xids
|
||||
* that have uncached children. (Note: there is no race condition
|
||||
* here because the overflowed flag cannot be cleared, only set, while
|
||||
* we hold ProcArrayLock. So we can't miss an Xid that we need to
|
||||
* worry about.)
|
||||
*/
|
||||
if (proc->subxids.overflowed)
|
||||
xids[nxids++] = pxid;
|
||||
@@ -295,11 +293,10 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
/*
|
||||
* Step 3: have to check pg_subtrans.
|
||||
*
|
||||
* At this point, we know it's either a subtransaction of one of the Xids
|
||||
* in xids[], or it's not running. If it's an already-failed
|
||||
* subtransaction, we want to say "not running" even though its parent
|
||||
* may still be running. So first, check pg_clog to see if it's been
|
||||
* aborted.
|
||||
* At this point, we know it's either a subtransaction of one of the Xids in
|
||||
* xids[], or it's not running. If it's an already-failed subtransaction,
|
||||
* we want to say "not running" even though its parent may still be
|
||||
* running. So first, check pg_clog to see if it's been aborted.
|
||||
*/
|
||||
xc_slow_answer_inc();
|
||||
|
||||
@@ -307,10 +304,9 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
goto result_known;
|
||||
|
||||
/*
|
||||
* It isn't aborted, so check whether the transaction tree it belongs
|
||||
* to is still running (or, more precisely, whether it was running
|
||||
* when this routine started -- note that we already released
|
||||
* ProcArrayLock).
|
||||
* It isn't aborted, so check whether the transaction tree it belongs to
|
||||
* is still running (or, more precisely, whether it was running when this
|
||||
* routine started -- note that we already released ProcArrayLock).
|
||||
*/
|
||||
topxid = SubTransGetTopmostTransaction(xid);
|
||||
Assert(TransactionIdIsValid(topxid));
|
||||
@@ -350,8 +346,8 @@ TransactionIdIsActive(TransactionId xid)
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Don't bother checking a transaction older than RecentXmin; it
|
||||
* could not possibly still be running.
|
||||
* Don't bother checking a transaction older than RecentXmin; it could not
|
||||
* possibly still be running.
|
||||
*/
|
||||
if (TransactionIdPrecedes(xid, RecentXmin))
|
||||
return false;
|
||||
@@ -413,9 +409,9 @@ GetOldestXmin(bool allDbs)
|
||||
/*
|
||||
* Normally we start the min() calculation with our own XID. But if
|
||||
* called by checkpointer, we will not be inside a transaction, so use
|
||||
* next XID as starting point for min() calculation. (Note that if
|
||||
* there are no xacts running at all, that will be the subtrans
|
||||
* truncation point!)
|
||||
* next XID as starting point for min() calculation. (Note that if there
|
||||
* are no xacts running at all, that will be the subtrans truncation
|
||||
* point!)
|
||||
*/
|
||||
if (IsTransactionState())
|
||||
result = GetTopTransactionId();
|
||||
@@ -463,7 +459,7 @@ GetOldestXmin(bool allDbs)
|
||||
* This ensures that the set of transactions seen as "running" by the
|
||||
* current xact will not change after it takes the snapshot.
|
||||
*
|
||||
* Note that only top-level XIDs are included in the snapshot. We can
|
||||
* Note that only top-level XIDs are included in the snapshot. We can
|
||||
* still apply the xmin and xmax limits to subtransaction XIDs, but we
|
||||
* need to work a bit harder to see if XIDs in [xmin..xmax) are running.
|
||||
*
|
||||
@@ -474,7 +470,7 @@ GetOldestXmin(bool allDbs)
|
||||
* RecentXmin: the xmin computed for the most recent snapshot. XIDs
|
||||
* older than this are known not running any more.
|
||||
* RecentGlobalXmin: the global xmin (oldest TransactionXmin across all
|
||||
* running transactions). This is the same computation done by
|
||||
* running transactions). This is the same computation done by
|
||||
* GetOldestXmin(TRUE).
|
||||
*----------
|
||||
*/
|
||||
@@ -496,14 +492,14 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
|
||||
TransactionIdIsValid(MyProc->xmin));
|
||||
|
||||
/*
|
||||
* Allocating space for maxProcs xids is usually overkill;
|
||||
* numProcs would be sufficient. But it seems better to do the
|
||||
* malloc while not holding the lock, so we can't look at numProcs.
|
||||
* Allocating space for maxProcs xids is usually overkill; numProcs would
|
||||
* be sufficient. But it seems better to do the malloc while not holding
|
||||
* the lock, so we can't look at numProcs.
|
||||
*
|
||||
* This does open a possibility for avoiding repeated malloc/free: since
|
||||
* maxProcs does not change at runtime, we can simply reuse the
|
||||
* previous xip array if any. (This relies on the fact that all
|
||||
* callers pass static SnapshotData structs.)
|
||||
* maxProcs does not change at runtime, we can simply reuse the previous
|
||||
* xip array if any. (This relies on the fact that all callers pass
|
||||
* static SnapshotData structs.)
|
||||
*/
|
||||
if (snapshot->xip == NULL)
|
||||
{
|
||||
@@ -563,13 +559,12 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
|
||||
TransactionId xid = proc->xid;
|
||||
|
||||
/*
|
||||
* Ignore my own proc (dealt with my xid above), procs not
|
||||
* running a transaction, and xacts started since we read the
|
||||
* next transaction ID. There's no need to store XIDs above
|
||||
* what we got from ReadNewTransactionId, since we'll treat
|
||||
* them as running anyway. We also assume that such xacts
|
||||
* can't compute an xmin older than ours, so they needn't be
|
||||
* considered in computing globalxmin.
|
||||
* Ignore my own proc (dealt with my xid above), procs not running a
|
||||
* transaction, and xacts started since we read the next transaction
|
||||
* ID. There's no need to store XIDs above what we got from
|
||||
* ReadNewTransactionId, since we'll treat them as running anyway. We
|
||||
* also assume that such xacts can't compute an xmin older than ours,
|
||||
* so they needn't be considered in computing globalxmin.
|
||||
*/
|
||||
if (proc == MyProc ||
|
||||
!TransactionIdIsNormal(xid) ||
|
||||
@@ -594,9 +589,9 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
|
||||
LWLockRelease(ProcArrayLock);
|
||||
|
||||
/*
|
||||
* Update globalxmin to include actual process xids. This is a
|
||||
* slightly different way of computing it than GetOldestXmin uses, but
|
||||
* should give the same result.
|
||||
* Update globalxmin to include actual process xids. This is a slightly
|
||||
* different way of computing it than GetOldestXmin uses, but should give
|
||||
* the same result.
|
||||
*/
|
||||
if (TransactionIdPrecedes(xmin, globalxmin))
|
||||
globalxmin = xmin;
|
||||
@@ -696,14 +691,14 @@ BackendPidGetProc(int pid)
|
||||
* Returns 0 if not found or it's a prepared transaction. Note that
|
||||
* it is up to the caller to be sure that the question remains
|
||||
* meaningful for long enough for the answer to be used ...
|
||||
*
|
||||
*
|
||||
* Only main transaction Ids are considered. This function is mainly
|
||||
* useful for determining what backend owns a lock.
|
||||
*/
|
||||
int
|
||||
BackendXidGetPid(TransactionId xid)
|
||||
{
|
||||
int result = 0;
|
||||
int result = 0;
|
||||
ProcArrayStruct *arrayP = procArray;
|
||||
int index;
|
||||
|
||||
@@ -754,9 +749,8 @@ CountActiveBackends(void)
|
||||
|
||||
/*
|
||||
* Note: for speed, we don't acquire ProcArrayLock. This is a little bit
|
||||
* bogus, but since we are only testing fields for zero or nonzero,
|
||||
* it should be OK. The result is only used for heuristic purposes
|
||||
* anyway...
|
||||
* bogus, but since we are only testing fields for zero or nonzero, it
|
||||
* should be OK. The result is only used for heuristic purposes anyway...
|
||||
*/
|
||||
for (index = 0; index < arrayP->numProcs; index++)
|
||||
{
|
||||
@@ -854,17 +848,16 @@ XidCacheRemoveRunningXids(TransactionId xid, int nxids, TransactionId *xids)
|
||||
|
||||
/*
|
||||
* We must hold ProcArrayLock exclusively in order to remove transactions
|
||||
* from the PGPROC array. (See notes in GetSnapshotData.) It's
|
||||
* possible this could be relaxed since we know this routine is only
|
||||
* used to abort subtransactions, but pending closer analysis we'd
|
||||
* best be conservative.
|
||||
* from the PGPROC array. (See notes in GetSnapshotData.) It's possible
|
||||
* this could be relaxed since we know this routine is only used to abort
|
||||
* subtransactions, but pending closer analysis we'd best be conservative.
|
||||
*/
|
||||
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Under normal circumstances xid and xids[] will be in increasing
|
||||
* order, as will be the entries in subxids. Scan backwards to avoid
|
||||
* O(N^2) behavior when removing a lot of xids.
|
||||
* Under normal circumstances xid and xids[] will be in increasing order,
|
||||
* as will be the entries in subxids. Scan backwards to avoid O(N^2)
|
||||
* behavior when removing a lot of xids.
|
||||
*/
|
||||
for (i = nxids - 1; i >= 0; i--)
|
||||
{
|
||||
@@ -878,11 +871,13 @@ XidCacheRemoveRunningXids(TransactionId xid, int nxids, TransactionId *xids)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Ordinarily we should have found it, unless the cache has overflowed.
|
||||
* However it's also possible for this routine to be invoked multiple
|
||||
* times for the same subtransaction, in case of an error during
|
||||
* AbortSubTransaction. So instead of Assert, emit a debug warning.
|
||||
* Ordinarily we should have found it, unless the cache has
|
||||
* overflowed. However it's also possible for this routine to be
|
||||
* invoked multiple times for the same subtransaction, in case of an
|
||||
* error during AbortSubTransaction. So instead of Assert, emit a
|
||||
* debug warning.
|
||||
*/
|
||||
if (j < 0 && !MyProc->subxids.overflowed)
|
||||
elog(WARNING, "did not find subXID %u in MyProc", anxid);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.87 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -71,13 +71,13 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */
|
||||
|
||||
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
|
||||
|
||||
slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */
|
||||
slock_t *ShmemLock; /* spinlock for shared memory and LWLock
|
||||
* allocation */
|
||||
|
||||
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
|
||||
|
||||
NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually
|
||||
* allocated for
|
||||
* ShmemIndex */
|
||||
NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually allocated
|
||||
* for ShmemIndex */
|
||||
|
||||
static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
|
||||
|
||||
@@ -205,11 +205,10 @@ InitShmemIndex(void)
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* Since ShmemInitHash calls ShmemInitStruct, which expects the
|
||||
* ShmemIndex hashtable to exist already, we have a bit of a
|
||||
* circularity problem in initializing the ShmemIndex itself. The
|
||||
* special "ShmemIndex" hash table name will tell ShmemInitStruct
|
||||
* to fake it.
|
||||
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
|
||||
* hashtable to exist already, we have a bit of a circularity problem in
|
||||
* initializing the ShmemIndex itself. The special "ShmemIndex" hash
|
||||
* table name will tell ShmemInitStruct to fake it.
|
||||
*/
|
||||
|
||||
/* create the shared memory shmem index */
|
||||
@@ -274,9 +273,9 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
void *location;
|
||||
|
||||
/*
|
||||
* Hash tables allocated in shared memory have a fixed directory; it
|
||||
* can't grow or other backends wouldn't be able to find it. So, make
|
||||
* sure we make it big enough to start with.
|
||||
* Hash tables allocated in shared memory have a fixed directory; it can't
|
||||
* grow or other backends wouldn't be able to find it. So, make sure we
|
||||
* make it big enough to start with.
|
||||
*
|
||||
* The shared memory allocator must be specified too.
|
||||
*/
|
||||
@@ -286,19 +285,19 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
|
||||
/* look it up in the shmem index */
|
||||
location = ShmemInitStruct(name,
|
||||
sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
|
||||
sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
|
||||
&found);
|
||||
|
||||
/*
|
||||
* shmem index is corrupted. Let someone else give the error
|
||||
* message since they have more information
|
||||
* shmem index is corrupted. Let someone else give the error message
|
||||
* since they have more information
|
||||
*/
|
||||
if (location == NULL)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* if it already exists, attach to it rather than allocate and
|
||||
* initialize new space
|
||||
* if it already exists, attach to it rather than allocate and initialize
|
||||
* new space
|
||||
*/
|
||||
if (found)
|
||||
hash_flags |= HASH_ATTACH;
|
||||
@@ -348,11 +347,11 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If the shmem index doesn't exist, we are bootstrapping: we
|
||||
* must be trying to init the shmem index itself.
|
||||
* If the shmem index doesn't exist, we are bootstrapping: we must
|
||||
* be trying to init the shmem index itself.
|
||||
*
|
||||
* Notice that the ShmemIndexLock is held until the shmem index
|
||||
* has been completely initialized.
|
||||
* Notice that the ShmemIndexLock is held until the shmem index has
|
||||
* been completely initialized.
|
||||
*/
|
||||
*foundPtr = FALSE;
|
||||
ShmemIndexAlloc = ShmemAlloc(size);
|
||||
@@ -375,9 +374,9 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
|
||||
if (*foundPtr)
|
||||
{
|
||||
/*
|
||||
* Structure is in the shmem index so someone else has allocated
|
||||
* it already. The size better be the same as the size we are
|
||||
* trying to initialize to or there is a name conflict (or worse).
|
||||
* Structure is in the shmem index so someone else has allocated it
|
||||
* already. The size better be the same as the size we are trying to
|
||||
* initialize to or there is a name conflict (or worse).
|
||||
*/
|
||||
if (result->size != size)
|
||||
{
|
||||
@@ -402,7 +401,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
|
||||
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
errmsg("could not allocate shared memory segment \"%s\"", name)));
|
||||
errmsg("could not allocate shared memory segment \"%s\"", name)));
|
||||
*foundPtr = FALSE;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.77 2005/08/20 23:26:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.78 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -109,7 +109,7 @@ SendSharedInvalidMessage(SharedInvalidationMessage *msg)
|
||||
*/
|
||||
void
|
||||
ReceiveSharedInvalidMessages(
|
||||
void (*invalFunction) (SharedInvalidationMessage *msg),
|
||||
void (*invalFunction) (SharedInvalidationMessage *msg),
|
||||
void (*resetFunction) (void))
|
||||
{
|
||||
SharedInvalidationMessage data;
|
||||
@@ -119,20 +119,20 @@ ReceiveSharedInvalidMessages(
|
||||
for (;;)
|
||||
{
|
||||
/*
|
||||
* We can discard any pending catchup event, since we will not
|
||||
* exit this loop until we're fully caught up.
|
||||
* We can discard any pending catchup event, since we will not exit
|
||||
* this loop until we're fully caught up.
|
||||
*/
|
||||
catchupInterruptOccurred = 0;
|
||||
|
||||
/*
|
||||
* We can run SIGetDataEntry in parallel with other backends
|
||||
* running SIGetDataEntry for themselves, since each instance will
|
||||
* modify only fields of its own backend's ProcState, and no
|
||||
* instance will look at fields of other backends' ProcStates. We
|
||||
* express this by grabbing SInvalLock in shared mode. Note that
|
||||
* this is not exactly the normal (read-only) interpretation of a
|
||||
* shared lock! Look closely at the interactions before allowing
|
||||
* SInvalLock to be grabbed in shared mode for any other reason!
|
||||
* We can run SIGetDataEntry in parallel with other backends running
|
||||
* SIGetDataEntry for themselves, since each instance will modify only
|
||||
* fields of its own backend's ProcState, and no instance will look at
|
||||
* fields of other backends' ProcStates. We express this by grabbing
|
||||
* SInvalLock in shared mode. Note that this is not exactly the
|
||||
* normal (read-only) interpretation of a shared lock! Look closely at
|
||||
* the interactions before allowing SInvalLock to be grabbed in shared
|
||||
* mode for any other reason!
|
||||
*/
|
||||
LWLockAcquire(SInvalLock, LW_SHARED);
|
||||
getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data);
|
||||
@@ -195,19 +195,18 @@ CatchupInterruptHandler(SIGNAL_ARGS)
|
||||
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
|
||||
|
||||
/*
|
||||
* We may be called while ImmediateInterruptOK is true; turn it
|
||||
* off while messing with the catchup state. (We would have to
|
||||
* save and restore it anyway, because PGSemaphore operations
|
||||
* inside ProcessCatchupEvent() might reset it.)
|
||||
* We may be called while ImmediateInterruptOK is true; turn it off
|
||||
* while messing with the catchup state. (We would have to save and
|
||||
* restore it anyway, because PGSemaphore operations inside
|
||||
* ProcessCatchupEvent() might reset it.)
|
||||
*/
|
||||
ImmediateInterruptOK = false;
|
||||
|
||||
/*
|
||||
* I'm not sure whether some flavors of Unix might allow another
|
||||
* SIGUSR1 occurrence to recursively interrupt this routine. To
|
||||
* cope with the possibility, we do the same sort of dance that
|
||||
* EnableCatchupInterrupt must do --- see that routine for
|
||||
* comments.
|
||||
* SIGUSR1 occurrence to recursively interrupt this routine. To cope
|
||||
* with the possibility, we do the same sort of dance that
|
||||
* EnableCatchupInterrupt must do --- see that routine for comments.
|
||||
*/
|
||||
catchupInterruptEnabled = 0; /* disable any recursive signal */
|
||||
catchupInterruptOccurred = 1; /* do at least one iteration */
|
||||
@@ -225,8 +224,7 @@ CatchupInterruptHandler(SIGNAL_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore ImmediateInterruptOK, and check for interrupts if
|
||||
* needed.
|
||||
* Restore ImmediateInterruptOK, and check for interrupts if needed.
|
||||
*/
|
||||
ImmediateInterruptOK = save_ImmediateInterruptOK;
|
||||
if (save_ImmediateInterruptOK)
|
||||
@@ -235,8 +233,7 @@ CatchupInterruptHandler(SIGNAL_ARGS)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* In this path it is NOT SAFE to do much of anything, except
|
||||
* this:
|
||||
* In this path it is NOT SAFE to do much of anything, except this:
|
||||
*/
|
||||
catchupInterruptOccurred = 1;
|
||||
}
|
||||
@@ -258,27 +255,25 @@ void
|
||||
EnableCatchupInterrupt(void)
|
||||
{
|
||||
/*
|
||||
* This code is tricky because we are communicating with a signal
|
||||
* handler that could interrupt us at any point. If we just checked
|
||||
* catchupInterruptOccurred and then set catchupInterruptEnabled, we
|
||||
* could fail to respond promptly to a signal that happens in between
|
||||
* those two steps. (A very small time window, perhaps, but Murphy's
|
||||
* Law says you can hit it...) Instead, we first set the enable flag,
|
||||
* then test the occurred flag. If we see an unserviced interrupt has
|
||||
* occurred, we re-clear the enable flag before going off to do the
|
||||
* service work. (That prevents re-entrant invocation of
|
||||
* ProcessCatchupEvent() if another interrupt occurs.) If an interrupt
|
||||
* comes in between the setting and clearing of
|
||||
* catchupInterruptEnabled, then it will have done the service work
|
||||
* and left catchupInterruptOccurred zero, so we have to check again
|
||||
* after clearing enable. The whole thing has to be in a loop in case
|
||||
* another interrupt occurs while we're servicing the first. Once we
|
||||
* get out of the loop, enable is set and we know there is no
|
||||
* This code is tricky because we are communicating with a signal handler
|
||||
* that could interrupt us at any point. If we just checked
|
||||
* catchupInterruptOccurred and then set catchupInterruptEnabled, we could
|
||||
* fail to respond promptly to a signal that happens in between those two
|
||||
* steps. (A very small time window, perhaps, but Murphy's Law says you
|
||||
* can hit it...) Instead, we first set the enable flag, then test the
|
||||
* occurred flag. If we see an unserviced interrupt has occurred, we
|
||||
* re-clear the enable flag before going off to do the service work.
|
||||
* (That prevents re-entrant invocation of ProcessCatchupEvent() if
|
||||
* another interrupt occurs.) If an interrupt comes in between the setting
|
||||
* and clearing of catchupInterruptEnabled, then it will have done the
|
||||
* service work and left catchupInterruptOccurred zero, so we have to
|
||||
* check again after clearing enable. The whole thing has to be in a loop
|
||||
* in case another interrupt occurs while we're servicing the first. Once
|
||||
* we get out of the loop, enable is set and we know there is no
|
||||
* unserviced interrupt.
|
||||
*
|
||||
* NB: an overenthusiastic optimizing compiler could easily break this
|
||||
* code. Hopefully, they all understand what "volatile" means these
|
||||
* days.
|
||||
* NB: an overenthusiastic optimizing compiler could easily break this code.
|
||||
* Hopefully, they all understand what "volatile" means these days.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@@ -330,17 +325,17 @@ ProcessCatchupEvent(void)
|
||||
notify_enabled = DisableNotifyInterrupt();
|
||||
|
||||
/*
|
||||
* What we need to do here is cause ReceiveSharedInvalidMessages() to
|
||||
* run, which will do the necessary work and also reset the
|
||||
* catchupInterruptOccurred flag. If we are inside a transaction we
|
||||
* can just call AcceptInvalidationMessages() to do this. If we
|
||||
* aren't, we start and immediately end a transaction; the call to
|
||||
* What we need to do here is cause ReceiveSharedInvalidMessages() to run,
|
||||
* which will do the necessary work and also reset the
|
||||
* catchupInterruptOccurred flag. If we are inside a transaction we can
|
||||
* just call AcceptInvalidationMessages() to do this. If we aren't, we
|
||||
* start and immediately end a transaction; the call to
|
||||
* AcceptInvalidationMessages() happens down inside transaction start.
|
||||
*
|
||||
* It is awfully tempting to just call AcceptInvalidationMessages()
|
||||
* without the rest of the xact start/stop overhead, and I think that
|
||||
* would actually work in the normal case; but I am not sure that
|
||||
* things would clean up nicely if we got an error partway through.
|
||||
* It is awfully tempting to just call AcceptInvalidationMessages() without
|
||||
* the rest of the xact start/stop overhead, and I think that would
|
||||
* actually work in the normal case; but I am not sure that things would
|
||||
* clean up nicely if we got an error partway through.
|
||||
*/
|
||||
if (IsTransactionOrTransactionBlock())
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.60 2005/08/20 23:26:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.61 2005/10/15 02:49:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -198,8 +198,8 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidationMessage *data)
|
||||
{
|
||||
/*
|
||||
* Don't panic just yet: slowest backend might have consumed some
|
||||
* messages but not yet have done SIDelExpiredDataEntries() to
|
||||
* advance minMsgNum. So, make sure minMsgNum is up-to-date.
|
||||
* messages but not yet have done SIDelExpiredDataEntries() to advance
|
||||
* minMsgNum. So, make sure minMsgNum is up-to-date.
|
||||
*/
|
||||
SIDelExpiredDataEntries(segP);
|
||||
numMsgs = segP->maxMsgNum - segP->minMsgNum;
|
||||
@@ -213,9 +213,9 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidationMessage *data)
|
||||
|
||||
/*
|
||||
* Try to prevent table overflow. When the table is 70% full send a
|
||||
* WAKEN_CHILDREN request to the postmaster. The postmaster will send
|
||||
* a SIGUSR1 signal to all the backends, which will cause sinval.c to
|
||||
* read any pending SI entries.
|
||||
* WAKEN_CHILDREN request to the postmaster. The postmaster will send a
|
||||
* SIGUSR1 signal to all the backends, which will cause sinval.c to read
|
||||
* any pending SI entries.
|
||||
*
|
||||
* This should never happen if all the backends are actively executing
|
||||
* queries, but if a backend is sitting idle then it won't be starting
|
||||
@@ -302,9 +302,9 @@ SIGetDataEntry(SISeg *segP, int backendId,
|
||||
stateP->nextMsgNum++;
|
||||
|
||||
/*
|
||||
* There may be other backends that haven't read the message, so we
|
||||
* cannot delete it here. SIDelExpiredDataEntries() should be called
|
||||
* to remove dead messages.
|
||||
* There may be other backends that haven't read the message, so we cannot
|
||||
* delete it here. SIDelExpiredDataEntries() should be called to remove
|
||||
* dead messages.
|
||||
*/
|
||||
return 1; /* got a message */
|
||||
}
|
||||
@@ -338,8 +338,8 @@ SIDelExpiredDataEntries(SISeg *segP)
|
||||
segP->minMsgNum = min;
|
||||
|
||||
/*
|
||||
* When minMsgNum gets really large, decrement all message counters so
|
||||
* as to forestall overflow of the counters.
|
||||
* When minMsgNum gets really large, decrement all message counters so as
|
||||
* to forestall overflow of the counters.
|
||||
*/
|
||||
if (min >= MSGNUMWRAPAROUND)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user