1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-16 15:02:33 +03:00

8.4 pgindent run, with new combined Linux/FreeBSD/MinGW typedef list

provided by Andrew.
This commit is contained in:
Bruce Momjian
2009-06-11 14:49:15 +00:00
parent 4e86efb4e5
commit d747140279
654 changed files with 11900 additions and 11387 deletions

View File

@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.104 2009/05/15 15:56:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.105 2009/06/11 14:49:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,7 +83,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
* obeys the rule of not calling exit() directly. So, while
* obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@@ -113,10 +113,10 @@ proc_exit(int code)
*
* Note that we do this here instead of in an on_proc_exit() callback
* because we want to ensure that this code executes last - we don't
* want to interfere with any other on_proc_exit() callback. For
* the same reason, we do not include it in proc_exit_prepare ...
* so if you are exiting in the "wrong way" you won't drop your profile
* in a nice place.
* want to interfere with any other on_proc_exit() callback. For the
* same reason, we do not include it in proc_exit_prepare ... so if
* you are exiting in the "wrong way" you won't drop your profile in a
* nice place.
*/
char gprofDirName[32];
@@ -229,8 +229,7 @@ atexit_callback(void)
/* ... too bad we don't know the real exit code ... */
proc_exit_prepare(-1);
}
#else /* assume we have on_exit instead */
#else /* assume we have on_exit instead */
static void
atexit_callback(int exitstatus, void *arg)
@@ -238,8 +237,7 @@ atexit_callback(int exitstatus, void *arg)
/* Clean up everything that must be cleaned up */
proc_exit_prepare(exitstatus);
}
#endif /* HAVE_ATEXIT */
#endif /* HAVE_ATEXIT */
/* ----------------------------------------------------------------
* on_proc_exit

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.27 2009/05/05 19:59:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.28 2009/06/11 14:49:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
* or it has successfully cleaned up after itself. A ACTIVE slot means the
* or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
@@ -57,8 +57,8 @@ struct PMSignalData
/* per-reason flags */
sig_atomic_t PMSignalFlags[NUM_PMSIGNALS];
/* per-child-process flags */
int num_child_flags; /* # of entries in PMChildFlags[] */
int next_child_flag; /* next slot to try to assign */
int num_child_flags; /* # of entries in PMChildFlags[] */
int next_child_flag; /* next slot to try to assign */
sig_atomic_t PMChildFlags[1]; /* VARIABLE LENGTH ARRAY */
};
@@ -181,6 +181,7 @@ ReleasePostmasterChildSlot(int slot)
Assert(slot > 0 && slot <= PMSignalState->num_child_flags);
slot--;
/*
* Note: the slot state might already be unused, because the logic in
* postmaster.c is such that this might get called twice when a child

View File

@@ -23,7 +23,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.49 2009/04/04 17:40:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.50 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -829,8 +829,8 @@ GetSnapshotData(Snapshot snapshot)
snapshot->curcid = GetCurrentCommandId(false);
/*
* This is a new snapshot, so set both refcounts are zero, and mark it
* as not copied in persistent memory.
* This is a new snapshot, so set both refcounts are zero, and mark it as
* not copied in persistent memory.
*/
snapshot->active_count = 0;
snapshot->regd_count = 0;
@@ -1038,7 +1038,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
* other backend could have set its xmin *before* we look. We know however
* other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@@ -1133,9 +1133,9 @@ CountActiveBackends(void)
*
* If someone just decremented numProcs, 'proc' could also point to a
* PGPROC entry that's no longer in the array. It still points to a
* PGPROC struct, though, because freed PGPPROC entries just go to
* the free list and are recycled. Its contents are nonsense in that
* case, but that's acceptable for this function.
* PGPROC struct, though, because freed PGPPROC entries just go to the
* free list and are recycled. Its contents are nonsense in that case,
* but that's acceptable for this function.
*/
if (proc != NULL)
continue;
@@ -1235,7 +1235,8 @@ bool
CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
{
ProcArrayStruct *arrayP = procArray;
#define MAXAUTOVACPIDS 10 /* max autovacs to SIGTERM per iteration */
#define MAXAUTOVACPIDS 10 /* max autovacs to SIGTERM per iteration */
int autovac_pids[MAXAUTOVACPIDS];
int tries;
@@ -1280,10 +1281,10 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
return false; /* no conflicting backends, so done */
/*
* Send SIGTERM to any conflicting autovacuums before sleeping.
* We postpone this step until after the loop because we don't
* want to hold ProcArrayLock while issuing kill().
* We have no idea what might block kill() inside the kernel...
* Send SIGTERM to any conflicting autovacuums before sleeping. We
* postpone this step until after the loop because we don't want to
* hold ProcArrayLock while issuing kill(). We have no idea what might
* block kill() inside the kernel...
*/
for (index = 0; index < nautovacs; index++)
(void) kill(autovac_pids[index], SIGTERM); /* ignore any error */

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.89 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.90 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
* inval messages have been processed before it exits. This is the reason
* inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@@ -77,9 +77,10 @@ ReceiveSharedInvalidMessages(
{
#define MAXINVALMSGS 32
static SharedInvalidationMessage messages[MAXINVALMSGS];
/*
* We use volatile here to prevent bugs if a compiler doesn't realize
* that recursion is a possibility ...
* We use volatile here to prevent bugs if a compiler doesn't realize that
* recursion is a possibility ...
*/
static volatile int nextmsg = 0;
static volatile int nummsgs = 0;
@@ -121,18 +122,18 @@ ReceiveSharedInvalidMessages(
}
/*
* We only need to loop if the last SIGetDataEntries call (which
* might have been within a recursive call) returned a full buffer.
* We only need to loop if the last SIGetDataEntries call (which might
* have been within a recursive call) returned a full buffer.
*/
} while (nummsgs == MAXINVALMSGS);
/*
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we
* need to flush dead messages right now, as that we want to pass on
* the catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for
* what should be just a background maintenance activity.
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
* catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
if (catchupInterruptOccurred)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.77 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.78 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
* needed. This avoids undue contention from multiple backends all trying
* needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@@ -88,7 +88,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
* to write maxMsgNum. (The exact rule is that you need the spinlock to
* to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@@ -146,8 +146,8 @@ typedef struct ProcState
/*
* Next LocalTransactionId to use for each idle backend slot. We keep
* this here because it is indexed by BackendId and it is convenient to
* copy the value to and from local memory when MyBackendId is set.
* It's meaningless in an active ProcState entry.
* copy the value to and from local memory when MyBackendId is set. It's
* meaningless in an active ProcState entry.
*/
LocalTransactionId nextLXID;
} ProcState;
@@ -235,8 +235,8 @@ CreateSharedInvalidationState(void)
/* Mark all backends inactive, and initialize nextLXID */
for (i = 0; i < shmInvalBuffer->maxBackends; i++)
{
shmInvalBuffer->procState[i].procPid = 0; /* inactive */
shmInvalBuffer->procState[i].nextMsgNum = 0; /* meaningless */
shmInvalBuffer->procState[i].procPid = 0; /* inactive */
shmInvalBuffer->procState[i].nextMsgNum = 0; /* meaningless */
shmInvalBuffer->procState[i].resetState = false;
shmInvalBuffer->procState[i].signaled = false;
shmInvalBuffer->procState[i].nextLXID = InvalidLocalTransactionId;
@@ -255,11 +255,11 @@ SharedInvalBackendInit(void)
SISeg *segP = shmInvalBuffer;
/*
* This can run in parallel with read operations, and for that matter
* with write operations; but not in parallel with additions and removals
* of backends, nor in parallel with SICleanupQueue. It doesn't seem
* worth having a third lock, so we choose to use SInvalWriteLock to
* serialize additions/removals.
* This can run in parallel with read operations, and for that matter with
* write operations; but not in parallel with additions and removals of
* backends, nor in parallel with SICleanupQueue. It doesn't seem worth
* having a third lock, so we choose to use SInvalWriteLock to serialize
* additions/removals.
*/
LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
@@ -394,7 +394,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
* N can be arbitrarily large. We divide the work into groups of no more
* N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@@ -404,9 +404,9 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
*/
while (n > 0)
{
int nthistime = Min(n, WRITE_QUANTUM);
int numMsgs;
int max;
int nthistime = Min(n, WRITE_QUANTUM);
int numMsgs;
int max;
n -= nthistime;
@@ -416,7 +416,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
* fullness threshold. We have to loop and recheck the buffer state
* fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@@ -458,9 +458,9 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* get next SI message(s) for current backend, if there are any
*
* Possible return values:
* 0: no SI message available
* 0: no SI message available
* n>0: next n SI messages have been extracted into data[]
* -1: SI reset message extracted
* -1: SI reset message extracted
*
* If the return value is less than the array size "datasize", the caller
* can assume that there are no more SI messages after the one(s) returned.
@@ -470,11 +470,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
* in shared mode. Note that this is not exactly the normal (read-only)
* in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
* NB: this can also run in parallel with SIInsertDataEntries. It is not
* NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@@ -488,7 +488,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
ProcState *stateP;
int max;
int n;
LWLockAcquire(SInvalReadLock, LW_SHARED);
segP = shmInvalBuffer;
@@ -557,7 +557,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
* free message slots at exit. Caller must recheck and perhaps retry.
* free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
@@ -576,9 +576,9 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
LWLockAcquire(SInvalReadLock, LW_EXCLUSIVE);
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify
* the furthest-back backend that needs signaling (if any), and reset
* any backends that are too far back.
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
* backends that are too far back.
*/
min = segP->maxMsgNum;
minsig = min - SIG_THRESHOLD;
@@ -587,15 +587,15 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
for (i = 0; i < segP->lastBackend; i++)
{
ProcState *stateP = &segP->procState[i];
int n = stateP->nextMsgNum;
int n = stateP->nextMsgNum;
/* Ignore if inactive or already in reset state */
if (stateP->procPid == 0 || stateP->resetState)
continue;
/*
* If we must free some space and this backend is preventing it,
* force him into reset state and then ignore until he catches up.
* If we must free some space and this backend is preventing it, force
* him into reset state and then ignore until he catches up.
*/
if (n < lowbound)
{
@@ -619,8 +619,8 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
/*
* When minMsgNum gets really large, decrement all message counters so as
* to forestall overflow of the counters. This happens seldom enough
* that folding it into the previous loop would be a loser.
* to forestall overflow of the counters. This happens seldom enough that
* folding it into the previous loop would be a loser.
*/
if (min >= MSGNUMWRAPAROUND)
{
@@ -649,7 +649,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
*/
if (needSig)
{
pid_t his_pid = needSig->procPid;
pid_t his_pid = needSig->procPid;
needSig->signaled = true;
LWLockRelease(SInvalReadLock);