1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-11 10:01:57 +03:00

pgindent run for 8.2.

This commit is contained in:
Bruce Momjian
2006-10-04 00:30:14 +00:00
parent 451e419e98
commit f99a569a2e
522 changed files with 21297 additions and 17170 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.211 2006/09/25 22:01:10 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.212 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -178,10 +178,10 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* This can happen because mdread doesn't complain about reads beyond
* EOF --- which is arguably bogus, but changing it seems tricky ---
* and so a previous attempt to read a block just beyond EOF could
* have left a "valid" zero-filled buffer. Unfortunately, we have
* have left a "valid" zero-filled buffer. Unfortunately, we have
* also seen this case occurring because of buggy Linux kernels that
* sometimes return an lseek(SEEK_END) result that doesn't account for
* a recent write. In that situation, the pre-existing buffer would
* a recent write. In that situation, the pre-existing buffer would
* contain valid data that we don't want to overwrite. Since the
* legitimate cases should always have left a zero-filled buffer,
* complain if not PageIsNew.
@ -194,10 +194,10 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
* We *must* do smgrextend before succeeding, else the
* page will not be reserved by the kernel, and the next P_NEW call
* will decide to return the same page. Clear the BM_VALID bit,
* do the StartBufferIO call that BufferAlloc didn't, and proceed.
* We *must* do smgrextend before succeeding, else the page will not
* be reserved by the kernel, and the next P_NEW call will decide to
* return the same page. Clear the BM_VALID bit, do the StartBufferIO
* call that BufferAlloc didn't, and proceed.
*/
if (isLocalBuf)
{
@ -208,11 +208,12 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
else
{
/*
* Loop to handle the very small possibility that someone
* re-sets BM_VALID between our clearing it and StartBufferIO
* inspecting it.
* Loop to handle the very small possibility that someone re-sets
* BM_VALID between our clearing it and StartBufferIO inspecting
* it.
*/
do {
do
{
LockBufHdr(bufHdr);
Assert(bufHdr->flags & BM_VALID);
bufHdr->flags &= ~BM_VALID;
@ -311,10 +312,10 @@ BufferAlloc(Relation reln,
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
LWLockId newPartitionLock; /* buffer partition lock for it */
LWLockId newPartitionLock; /* buffer partition lock for it */
BufferTag oldTag; /* previous identity of selected buffer */
uint32 oldHash; /* hash value for oldTag */
LWLockId oldPartitionLock; /* buffer partition lock for it */
LWLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
int buf_id;
volatile BufferDesc *buf;
@ -620,7 +621,7 @@ InvalidateBuffer(volatile BufferDesc *buf)
{
BufferTag oldTag;
uint32 oldHash; /* hash value for oldTag */
LWLockId oldPartitionLock; /* buffer partition lock for it */
LWLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
/* Save the original buffer tag before dropping the spinlock */
@ -629,9 +630,9 @@ InvalidateBuffer(volatile BufferDesc *buf)
UnlockBufHdr(buf);
/*
* Need to compute the old tag's hashcode and partition lock ID.
* XXX is it worth storing the hashcode in BufferDesc so we need
* not recompute it here? Probably not.
* Need to compute the old tag's hashcode and partition lock ID. XXX is it
* worth storing the hashcode in BufferDesc so we need not recompute it
* here? Probably not.
*/
oldHash = BufTableHashCode(&oldTag);
oldPartitionLock = BufMappingPartitionLock(oldHash);
@ -715,7 +716,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
* Buffer must be pinned and exclusive-locked. (If caller does not hold
* Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@ -972,9 +973,9 @@ BufferSync(void)
{
/*
* If in bgwriter, absorb pending fsync requests after each
* WRITES_PER_ABSORB write operations, to prevent overflow of
* the fsync request queue. If not in bgwriter process, this is
* a no-op.
* WRITES_PER_ABSORB write operations, to prevent overflow of the
* fsync request queue. If not in bgwriter process, this is a
* no-op.
*/
if (--absorb_counter <= 0)
{
@ -1770,9 +1771,9 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
* tuples. So, be as quick as we can if the buffer is already dirty. We
* tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
* already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
* already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
* immediately after we look, because the buffer content update is already
* done and will be reflected in the I/O.)
*/

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.56 2006/07/23 18:34:45 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.57 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -237,12 +237,12 @@ StrategyInitialize(bool init)
/*
* Initialize the shared buffer lookup hashtable.
*
* Since we can't tolerate running out of lookup table entries, we
* must be sure to specify an adequate table size here. The maximum
* steady-state usage is of course NBuffers entries, but BufferAlloc()
* tries to insert a new entry before deleting the old. In principle
* this could be happening in each partition concurrently, so we
* could need as many as NBuffers + NUM_BUFFER_PARTITIONS entries.
* Since we can't tolerate running out of lookup table entries, we must be
* sure to specify an adequate table size here. The maximum steady-state
* usage is of course NBuffers entries, but BufferAlloc() tries to insert
* a new entry before deleting the old. In principle this could be
* happening in each partition concurrently, so we could need as many as
* NBuffers + NUM_BUFFER_PARTITIONS entries.
*/
InitBufTable(NBuffers + NUM_BUFFER_PARTITIONS);

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.129 2006/08/24 03:15:43 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.130 2006/10/04 00:29:57 momjian Exp $
*
* NOTES:
*
@ -1026,7 +1026,7 @@ retry:
* in which case immediate retry is indicated.
*/
#ifdef WIN32
DWORD error = GetLastError();
DWORD error = GetLastError();
switch (error)
{
@ -1081,7 +1081,7 @@ retry:
* See comments in FileRead()
*/
#ifdef WIN32
DWORD error = GetLastError();
DWORD error = GetLastError();
switch (error)
{
@ -1279,8 +1279,8 @@ TryAgain:
}
/*
* TEMPORARY hack to log the Windows error code on fopen failures,
* in hopes of diagnosing some hard-to-reproduce problems.
* TEMPORARY hack to log the Windows error code on fopen failures, in
* hopes of diagnosing some hard-to-reproduce problems.
*/
#ifdef WIN32
{

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.55 2006/09/21 20:31:22 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.56 2006/10/04 00:29:57 momjian Exp $
*
*
* NOTES:
@ -111,7 +111,7 @@ typedef struct FsmCacheRelHeader
RelFileNode key; /* hash key (must be first) */
bool isIndex; /* if true, we store only page numbers */
uint32 avgRequest; /* moving average of space requests */
BlockNumber interestingPages; /* # of pages with useful free space */
BlockNumber interestingPages; /* # of pages with useful free space */
int32 storedPages; /* # of pages stored in arena */
} FsmCacheRelHeader;
@ -128,8 +128,8 @@ static void CheckFreeSpaceMapStatistics(int elevel, int numRels,
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
static FSMRelation *create_fsm_rel(RelFileNode *rel);
static void delete_fsm_rel(FSMRelation *fsmrel);
static int realloc_fsm_rel(FSMRelation *fsmrel, BlockNumber interestingPages,
bool isIndex);
static int realloc_fsm_rel(FSMRelation *fsmrel, BlockNumber interestingPages,
bool isIndex);
static void link_fsm_rel_usage(FSMRelation *fsmrel);
static void unlink_fsm_rel_usage(FSMRelation *fsmrel);
static void link_fsm_rel_storage(FSMRelation *fsmrel);
@ -601,6 +601,7 @@ PrintFreeSpaceMapStatistics(int elevel)
double needed;
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
* Count total space actually used, as well as the unclamped request total
*/
@ -1688,9 +1689,9 @@ fsm_calc_request(FSMRelation *fsmrel)
}
/*
* We clamp the per-relation requests to at most half the arena size;
* this is intended to prevent a single bloated relation from crowding
* out FSM service for every other rel.
* We clamp the per-relation requests to at most half the arena size; this
* is intended to prevent a single bloated relation from crowding out FSM
* service for every other rel.
*/
req = Min(req, FreeSpaceMap->totalChunks / 2);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.87 2006/08/01 19:03:11 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.88 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -95,8 +95,8 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
size = add_size(size, 8192 - (size % 8192));
/*
* The shared memory for add-ins is treated as a separate
* segment, but in reality it is not.
* The shared memory for add-ins is treated as a separate segment, but
* in reality it is not.
*/
size_b4addins = size;
size = add_size(size, AddinShmemSize());
@ -115,8 +115,8 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
* Modify hdr to show segment size before add-ins
*/
seghdr->totalsize = size_b4addins;
/*
/*
* Set up segment header sections in each Addin context
*/
InitAddinContexts((void *) ((char *) seghdr + size_b4addins));

View File

@ -23,7 +23,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.17 2006/09/03 15:59:38 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.18 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -393,7 +393,7 @@ TransactionIdIsActive(TransactionId xid)
* This is used by VACUUM to decide which deleted tuples must be preserved
* in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
* own database could ever see the tuples in them. Also, we can ignore
* own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
@ -545,13 +545,13 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
globalxmin = xmin = GetTopTransactionId();
/*
* It is sufficient to get shared lock on ProcArrayLock, even if we
* are computing a serializable snapshot and therefore will be setting
* It is sufficient to get shared lock on ProcArrayLock, even if we are
* computing a serializable snapshot and therefore will be setting
* MyProc->xmin. This is because any two backends that have overlapping
* shared holds on ProcArrayLock will certainly compute the same xmin
* (since no xact, in particular not the oldest, can exit the set of
* running transactions while we hold ProcArrayLock --- see further
* discussion just below). So it doesn't matter whether another backend
* discussion just below). So it doesn't matter whether another backend
* concurrently doing GetSnapshotData or GetOldestXmin sees our xmin as
* set or not; he'd compute the same xmin for himself either way.
*/
@ -595,8 +595,8 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
/*
* Ignore my own proc (dealt with my xid above), procs not running a
* transaction, xacts started since we read the next transaction
* ID, and xacts executing LAZY VACUUM. There's no need to store XIDs
* transaction, xacts started since we read the next transaction ID,
* and xacts executing LAZY VACUUM. There's no need to store XIDs
* above what we got from ReadNewTransactionId, since we'll treat them
* as running anyway. We also assume that such xacts can't compute an
* xmin older than ours, so they needn't be considered in computing
@ -625,18 +625,17 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
* their parent, so no need to check them against xmin.
*
* The other backend can add more subxids concurrently, but cannot
* remove any. Hence it's important to fetch nxids just once.
* Should be safe to use memcpy, though. (We needn't worry about
* missing any xids added concurrently, because they must postdate
* xmax.)
* remove any. Hence it's important to fetch nxids just once. Should
* be safe to use memcpy, though. (We needn't worry about missing any
* xids added concurrently, because they must postdate xmax.)
*/
if (subcount >= 0)
{
if (proc->subxids.overflowed)
subcount = -1; /* overflowed */
subcount = -1; /* overflowed */
else
{
int nxids = proc->subxids.nxids;
int nxids = proc->subxids.nxids;
if (nxids > 0)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.96 2006/09/27 18:40:09 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.97 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -62,14 +62,14 @@
* hash bucket garbage collector if need be. Right now, it seems
* unnecessary.
*
* (e) Add-ins can request their own logical shared memory segments
* by calling RegisterAddinContext() from the preload-libraries hook.
* Each call establishes a uniquely named add-in shared memopry
* context which will be set up as part of postgres intialisation.
* Memory can be allocated from these contexts using
* ShmemAllocFromContext(), and can be reset to its initial condition
* using ShmemResetContext(). Also, RegisterAddinLWLock(LWLockid *lock_ptr)
* can be used to request that a LWLock be allocated, placed into *lock_ptr.
* (e) Add-ins can request their own logical shared memory segments
* by calling RegisterAddinContext() from the preload-libraries hook.
* Each call establishes a uniquely named add-in shared memopry
* context which will be set up as part of postgres intialisation.
* Memory can be allocated from these contexts using
* ShmemAllocFromContext(), and can be reset to its initial condition
* using ShmemResetContext(). Also, RegisterAddinLWLock(LWLockid *lock_ptr)
* can be used to request that a LWLock be allocated, placed into *lock_ptr.
*/
#include "postgres.h"
@ -98,9 +98,9 @@ static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
/* Structures and globals for managing add-in shared memory contexts */
typedef struct context
{
char *name;
Size size;
PGShmemHeader *seg_hdr;
char *name;
Size size;
PGShmemHeader *seg_hdr;
struct context *next;
} ContextNode;
@ -138,9 +138,9 @@ InitShmemAllocation(void)
Assert(shmhdr != NULL);
/*
* Initialize the spinlock used by ShmemAlloc. We have to do the
* space allocation the hard way, since obviously ShmemAlloc can't
* be called yet.
* Initialize the spinlock used by ShmemAlloc. We have to do the space
* allocation the hard way, since obviously ShmemAlloc can't be called
* yet.
*/
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
@ -153,22 +153,22 @@ InitShmemAllocation(void)
ShmemIndex = (HTAB *) NULL;
/*
* Initialize ShmemVariableCache for transaction manager.
* (This doesn't really belong here, but not worth moving.)
* Initialize ShmemVariableCache for transaction manager. (This doesn't
* really belong here, but not worth moving.)
*/
ShmemVariableCache = (VariableCache)
ShmemAlloc(sizeof(*ShmemVariableCache));
ShmemAlloc(sizeof(*ShmemVariableCache));
memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
/*
* RegisterAddinContext -- Register the requirement for a named shared
* memory context.
* memory context.
*/
void
RegisterAddinContext(const char *name, Size size)
{
char *newstr = malloc(strlen(name) + 1);
char *newstr = malloc(strlen(name) + 1);
ContextNode *node = malloc(sizeof(ContextNode));
strcpy(newstr, name);
@ -185,7 +185,7 @@ RegisterAddinContext(const char *name, Size size)
/*
* ContextFromName -- Return the ContextNode for the given named
* context, or NULL if not found.
* context, or NULL if not found.
*/
static ContextNode *
ContextFromName(const char *name)
@ -203,7 +203,7 @@ ContextFromName(const char *name)
/*
* InitAddinContexts -- Initialise the registered addin shared memory
* contexts.
* contexts.
*/
void
InitAddinContexts(void *start)
@ -218,7 +218,7 @@ InitAddinContexts(void *start)
next_segment->totalsize = context->size;
next_segment->freeoffset = MAXALIGN(sizeof(PGShmemHeader));
next_segment = (PGShmemHeader *)
next_segment = (PGShmemHeader *)
((char *) next_segment + context->size);
context = context->next;
}
@ -245,7 +245,7 @@ ShmemResetContext(const char *name)
/*
* AddinShmemSize -- Report how much shared memory has been registered
* for add-ins.
* for add-ins.
*/
Size
AddinShmemSize(void)
@ -265,15 +265,15 @@ AddinShmemSize(void)
void *
ShmemAllocFromContext(Size size, const char *context_name)
{
Size newStart;
Size newFree;
void *newSpace;
ContextNode *context;
Size newStart;
Size newFree;
void *newSpace;
ContextNode *context;
/* use volatile pointer to prevent code rearrangement */
volatile PGShmemHeader *shmemseghdr = ShmemSegHdr;
/*
/*
* if context_name is provided, allocate from the named context
*/
if (context_name)
@ -480,8 +480,8 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
* index has been initialized. This should be OK because no
* other process can be accessing shared memory yet.
* index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->indexoffset == 0);
structPtr = ShmemAlloc(size);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.88 2006/09/22 23:20:13 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.89 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -54,7 +54,7 @@ RelationInitLockInfo(Relation relation)
static inline void
SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
{
Oid dbid;
Oid dbid;
if (IsSharedRelation(relid))
dbid = InvalidOid;
@ -67,7 +67,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
* Lock a relation given only its OID. This should generally be used
* Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@ -81,13 +81,13 @@ LockRelationOid(Oid relid, LOCKMODE lockmode)
res = LockAcquire(&tag, lockmode, false, false);
/*
* Now that we have the lock, check for invalidation messages, so that
* we will update or flush any stale relcache entry before we try to use
* it. We can skip this in the not-uncommon case that we already had
* the same type of lock being requested, since then no one else could
* have modified the relcache entry in an undesirable way. (In the
* case where our own xact modifies the rel, the relcache update happens
* via CommandCounterIncrement, not here.)
* Now that we have the lock, check for invalidation messages, so that we
* will update or flush any stale relcache entry before we try to use it.
* We can skip this in the not-uncommon case that we already had the same
* type of lock being requested, since then no one else could have
* modified the relcache entry in an undesirable way. (In the case where
* our own xact modifies the rel, the relcache update happens via
* CommandCounterIncrement, not here.)
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
@ -116,8 +116,8 @@ ConditionalLockRelationOid(Oid relid, LOCKMODE lockmode)
return false;
/*
* Now that we have the lock, check for invalidation messages; see
* notes in LockRelationOid.
* Now that we have the lock, check for invalidation messages; see notes
* in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
@ -176,8 +176,8 @@ LockRelation(Relation relation, LOCKMODE lockmode)
res = LockAcquire(&tag, lockmode, false, false);
/*
* Now that we have the lock, check for invalidation messages; see
* notes in LockRelationOid.
* Now that we have the lock, check for invalidation messages; see notes
* in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
@ -206,8 +206,8 @@ ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
return false;
/*
* Now that we have the lock, check for invalidation messages; see
* notes in LockRelationOid.
* Now that we have the lock, check for invalidation messages; see notes
* in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.173 2006/09/18 22:40:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.174 2006/10/04 00:29:57 momjian Exp $
*
* NOTES
* A lock table is a shared memory hash table. When
@ -112,7 +112,7 @@ static const char *const lock_mode_names[] =
};
#ifndef LOCK_DEBUG
static bool Dummy_trace = false;
static bool Dummy_trace = false;
#endif
static const LockMethodData default_lockmethod = {
@ -290,8 +290,8 @@ InitLocks(void)
init_table_size = max_table_size / 2;
/*
* Allocate hash table for LOCK structs. This stores
* per-locked-object information.
* Allocate hash table for LOCK structs. This stores per-locked-object
* information.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(LOCKTAG);
@ -331,8 +331,8 @@ InitLocks(void)
elog(FATAL, "could not initialize proclock hash table");
/*
* Allocate non-shared hash table for LOCALLOCK structs. This stores
* lock counts and resource owner information.
* Allocate non-shared hash table for LOCALLOCK structs. This stores lock
* counts and resource owner information.
*
* The non-shared table could already exist in this process (this occurs
* when the postmaster is recreating shared memory after a backend crash).
@ -396,8 +396,8 @@ static uint32
proclock_hash(const void *key, Size keysize)
{
const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
uint32 lockhash;
Datum procptr;
uint32 lockhash;
Datum procptr;
Assert(keysize == sizeof(PROCLOCKTAG));
@ -407,9 +407,9 @@ proclock_hash(const void *key, Size keysize)
/*
* To make the hash code also depend on the PGPROC, we xor the proc
* struct's address into the hash code, left-shifted so that the
* partition-number bits don't change. Since this is only a hash,
* we don't care if we lose high-order bits of the address; use
* an intermediate variable to suppress cast-pointer-to-int warnings.
* partition-number bits don't change. Since this is only a hash, we
* don't care if we lose high-order bits of the address; use an
* intermediate variable to suppress cast-pointer-to-int warnings.
*/
procptr = PointerGetDatum(proclocktag->myProc);
lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
@ -426,8 +426,8 @@ proclock_hash(const void *key, Size keysize)
static inline uint32
ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
{
uint32 lockhash = hashcode;
Datum procptr;
uint32 lockhash = hashcode;
Datum procptr;
/*
* This must match proclock_hash()!
@ -1117,7 +1117,7 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
memcpy(new_status, old_status, len);
strcpy(new_status + len, " waiting");
set_ps_display(new_status, false);
new_status[len] = '\0'; /* truncate off " waiting" */
new_status[len] = '\0'; /* truncate off " waiting" */
}
pgstat_report_waiting(true);
@ -1549,12 +1549,12 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
LockTagHashCode(&lock->tag),
wakeupNeeded);
next_item:
next_item:
proclock = nextplock;
} /* loop over PROCLOCKs within this partition */
} /* loop over PROCLOCKs within this partition */
LWLockRelease(partitionLock);
} /* loop over partitions */
} /* loop over partitions */
#ifdef LOCK_DEBUG
if (*(lockMethodTable->trace_flag))
@ -1726,8 +1726,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
if (!lock)
{
/*
* If the lock object doesn't exist, there is nothing holding a
* lock on this lockable object.
* If the lock object doesn't exist, there is nothing holding a lock
* on this lockable object.
*/
LWLockRelease(partitionLock);
return NIL;
@ -1747,7 +1747,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
{
if (conflictMask & proclock->holdMask)
{
PGPROC *proc = proclock->tag.myProc;
PGPROC *proc = proclock->tag.myProc;
/* A backend never blocks itself */
if (proc != MyProc)
@ -1963,7 +1963,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
* the proclock would then be in the wrong hash chain. So, unlink
* the proclock would then be in the wrong hash chain. So, unlink
* and delete the old proclock; create a new one with the right
* contents; and link it into place. We do it in this order to be
* certain we won't run out of shared memory (the way dynahash.c
@ -1987,7 +1987,7 @@ PostPrepare_Locks(TransactionId xid)
(void *) &proclocktag,
HASH_ENTER_NULL, &found);
if (!newproclock)
ereport(PANIC, /* should not happen */
ereport(PANIC, /* should not happen */
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
@ -2017,12 +2017,12 @@ PostPrepare_Locks(TransactionId xid)
Assert((newproclock->holdMask & holdMask) == 0);
newproclock->holdMask |= holdMask;
next_item:
next_item:
proclock = nextplock;
} /* loop over PROCLOCKs within this partition */
} /* loop over PROCLOCKs within this partition */
LWLockRelease(partitionLock);
} /* loop over partitions */
} /* loop over partitions */
END_CRIT_SECTION();
}
@ -2084,10 +2084,11 @@ GetLockStatusData(void)
* operate one partition at a time if we want to deliver a self-consistent
* view of the state.
*
* Since this is a read-only operation, we take shared instead of exclusive
* lock. There's not a whole lot of point to this, because all the normal
* operations require exclusive lock, but it doesn't hurt anything either.
* It will at least allow two backends to do GetLockStatusData in parallel.
* Since this is a read-only operation, we take shared instead of
* exclusive lock. There's not a whole lot of point to this, because all
* the normal operations require exclusive lock, but it doesn't hurt
* anything either. It will at least allow two backends to do
* GetLockStatusData in parallel.
*
* Must grab LWLocks in partition-number order to avoid LWLock deadlock.
*/
@ -2119,7 +2120,7 @@ GetLockStatusData(void)
}
/* And release locks */
for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
Assert(el == data->nelements);

View File

@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.45 2006/08/07 21:56:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.46 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,7 +30,7 @@
#include "storage/spin.h"
static int NumAddinLWLocks(void);
static int NumAddinLWLocks(void);
static void AssignAddinLWLocks(void);
@ -95,23 +95,23 @@ static int *ex_acquire_counts;
static int *block_counts;
#endif
/*
/*
* Structures and globals to allow add-ins to register for their own
* lwlocks from the preload-libraries hook.
*/
typedef struct LWLockNode
{
LWLockId *lock;
LWLockId *lock;
struct LWLockNode *next;
} LWLockNode;
static LWLockNode *addin_locks = NULL;
static int num_addin_locks = 0;
static int num_addin_locks = 0;
/*
* RegisterAddinLWLock() --- Allow an andd-in to request a LWLock
* from the preload-libraries hook.
* from the preload-libraries hook.
*/
void
RegisterAddinLWLock(LWLockId *lock)
@ -198,8 +198,7 @@ print_lwlock_stats(int code, Datum arg)
LWLockRelease(0);
}
#endif /* LWLOCK_STATS */
#endif /* LWLOCK_STATS */
/*
@ -306,9 +305,8 @@ CreateLWLocks(void)
LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks;
/*
* Allocate LWLocks for those add-ins that have explicitly requested
* them.
/*
* Allocate LWLocks for those add-ins that have explicitly requested them.
*/
AssignAddinLWLocks();
}
@ -364,8 +362,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
{
int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
int numLocks = LWLockCounter[1];
int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
int numLocks = LWLockCounter[1];
sh_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts = calloc(numLocks, sizeof(int));
@ -378,7 +376,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
ex_acquire_counts[lockid]++;
else
sh_acquire_counts[lockid]++;
#endif /* LWLOCK_STATS */
#endif /* LWLOCK_STATS */
/*
* We can't wait if we haven't got a PGPROC. This should only occur

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.179 2006/07/30 02:07:18 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.180 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -149,8 +149,8 @@ InitProcGlobal(void)
Assert(!found);
/*
* Create the PGPROC structures for dummy (bgwriter) processes, too.
* These do not get linked into the freeProcs list.
* Create the PGPROC structures for dummy (bgwriter) processes, too. These
* do not get linked into the freeProcs list.
*/
DummyProcs = (PGPROC *)
ShmemInitStruct("DummyProcs", NUM_DUMMY_PROCS * sizeof(PGPROC),
@ -183,7 +183,7 @@ InitProcGlobal(void)
MemSet(DummyProcs, 0, NUM_DUMMY_PROCS * sizeof(PGPROC));
for (i = 0; i < NUM_DUMMY_PROCS; i++)
{
DummyProcs[i].pid = 0; /* marks dummy proc as not in use */
DummyProcs[i].pid = 0; /* marks dummy proc as not in use */
PGSemaphoreCreate(&(DummyProcs[i].sem));
}
@ -268,7 +268,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
* be careful and reinitialize its value here. (This is not strictly
* be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@ -298,9 +298,9 @@ InitProcessPhase2(void)
Assert(MyProc != NULL);
/*
* We should now know what database we're in, so advertise that. (We
* need not do any locking here, since no other backend can yet see
* our PGPROC.)
* We should now know what database we're in, so advertise that. (We need
* not do any locking here, since no other backend can yet see our
* PGPROC.)
*/
Assert(OidIsValid(MyDatabaseId));
MyProc->databaseId = MyDatabaseId;
@ -400,7 +400,7 @@ InitDummyProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
* be careful and reinitialize its value here. (This is not strictly
* be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@ -489,12 +489,12 @@ LockWaitCancel(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
* semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the
* lock we wanted before we were able to remove ourselves from the
* wait-list. However, now that ProcSleep loops until waitStatus changes,
* a leftover wakeup signal isn't harmful, and it seems not worth
* expending cycles to get rid of a signal that most likely isn't there.
* semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
* wakeup signal isn't harmful, and it seems not worth expending cycles to
* get rid of a signal that most likely isn't there.
*/
/*
@ -810,11 +810,11 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
* PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
* implementation. While this is normally good, there are cases where
* a saved wakeup might be leftover from a previous operation (for
* example, we aborted ProcWaitForSignal just before someone did
* ProcSendSignal). So, loop to wait again if the waitStatus shows
* we haven't been granted nor denied the lock yet.
* implementation. While this is normally good, there are cases where a
* saved wakeup might be leftover from a previous operation (for example,
* we aborted ProcWaitForSignal just before someone did ProcSendSignal).
* So, loop to wait again if the waitStatus shows we haven't been granted
* nor denied the lock yet.
*
* We pass interruptOK = true, which eliminates a window in which
* cancel/die interrupts would be held off undesirably. This is a promise
@ -824,7 +824,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
* updating the locallock table, but if we lose control to an error,
* LockWaitCancel will fix that up.
*/
do {
do
{
PGSemaphoreLock(&MyProc->sem, true);
} while (MyProc->waitStatus == STATUS_WAITING);
@ -835,9 +836,9 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
elog(FATAL, "could not disable timer for process wakeup");
/*
* Re-acquire the lock table's partition lock. We have to do this to
* hold off cancel/die interrupts before we can mess with lockAwaited
* (else we might have a missed or duplicated locallock update).
* Re-acquire the lock table's partition lock. We have to do this to hold
* off cancel/die interrupts before we can mess with lockAwaited (else we
* might have a missed or duplicated locallock update).
*/
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
@ -977,8 +978,8 @@ CheckDeadLock(void)
int i;
/*
* Acquire exclusive lock on the entire shared lock data structures.
* Must grab LWLocks in partition-number order to avoid LWLock deadlock.
* Acquire exclusive lock on the entire shared lock data structures. Must
* grab LWLocks in partition-number order to avoid LWLock deadlock.
*
* Note that the deadlock check interrupt had better not be enabled
* anywhere that this process itself holds lock partition locks, else this
@ -1018,7 +1019,7 @@ CheckDeadLock(void)
/*
* Oops. We have a deadlock.
*
* Get this process out of wait state. (Note: we could do this more
* Get this process out of wait state. (Note: we could do this more
* efficiently by relying on lockAwaited, but use this coding to preserve
* the flexibility to kill some other transaction than the one detecting
* the deadlock.)
@ -1047,12 +1048,12 @@ CheckDeadLock(void)
*/
/*
* Release locks acquired at head of routine. Order is not critical,
* so do it back-to-front to avoid waking another CheckDeadLock instance
* Release locks acquired at head of routine. Order is not critical, so
* do it back-to-front to avoid waking another CheckDeadLock instance
* before it can get all the locks.
*/
check_done:
for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
}
@ -1063,10 +1064,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
* before we actually reach the waiting state. Also as with locks,
* before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
* if not. This copes with possible "leftover" wakeups.
* if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
@ -1122,10 +1123,10 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
* interval will have elapsed and so this doesn't matter, but there
* are corner cases (involving multi-statement query strings with
* embedded COMMIT or ROLLBACK) where we might re-initialize the
* statement timeout long after initial receipt of the message.
* In such cases the enforcement of the statement timeout will be
* a bit inconsistent. This annoyance is judged not worth the cost
* of performing an additional gettimeofday() here.
* statement timeout long after initial receipt of the message. In
* such cases the enforcement of the statement timeout will be a bit
* inconsistent. This annoyance is judged not worth the cost of
* performing an additional gettimeofday() here.
*/
Assert(!deadlock_timeout_active);
fin_time = GetCurrentStatementStartTimestamp();
@ -1253,6 +1254,7 @@ CheckStatementTimeout(void)
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
/*
* It's possible that the difference is less than a microsecond;
* ensure we don't cancel, rather than set, the interrupt.

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.46 2006/07/14 14:52:23 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.47 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -119,7 +119,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_MSEC)
cur_delay = MIN_DELAY_MSEC;
@ -280,7 +280,6 @@ tas_dummy() /* really means: extern int tas(slock_t
asm(" .data");
}
#endif /* sun3 */
#endif /* not __GNUC__ */
#endif /* HAVE_SPINLOCKS */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.18 2006/08/25 04:06:53 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.19 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -51,7 +51,7 @@ ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
*/
BlockNumber b1 = BlockIdGetBlockNumber(&(arg1->ip_blkid));
BlockNumber b2 = BlockIdGetBlockNumber(&(arg2->ip_blkid));
if (b1 < b2)
return -1;
else if (b1 > b2)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.121 2006/07/14 05:28:28 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.122 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -735,8 +735,8 @@ mdsync(void)
* If in bgwriter, absorb pending requests every so often to
* prevent overflow of the fsync request queue. The hashtable
* code does not specify whether entries added by this will be
* visited by our search, but we don't really care: it's OK if
* we do, and OK if we don't.
* visited by our search, but we don't really care: it's OK if we
* do, and OK if we don't.
*/
if (--absorb_counter <= 0)
{

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.100 2006/07/14 14:52:23 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.101 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -471,10 +471,10 @@ smgr_internal_unlink(RelFileNode rnode, int which, bool isTemp, bool isRedo)
FreeSpaceMapForgetRel(&rnode);
/*
* Tell the stats collector to forget it immediately, too. Skip this
* in recovery mode, since the stats collector likely isn't running
* (and if it is, pgstat.c will get confused because we aren't a real
* backend process).
* Tell the stats collector to forget it immediately, too. Skip this in
* recovery mode, since the stats collector likely isn't running (and if
* it is, pgstat.c will get confused because we aren't a real backend
* process).
*/
if (!InRecovery)
pgstat_drop_relation(rnode.relNode);
@ -960,16 +960,16 @@ smgr_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_smgr_create *xlrec = (xl_smgr_create *) rec;
appendStringInfo(buf, "file create: %u/%u/%u",
xlrec->rnode.spcNode, xlrec->rnode.dbNode,
xlrec->rnode.relNode);
xlrec->rnode.spcNode, xlrec->rnode.dbNode,
xlrec->rnode.relNode);
}
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
appendStringInfo(buf, "file truncate: %u/%u/%u to %u blocks",
xlrec->rnode.spcNode, xlrec->rnode.dbNode,
xlrec->rnode.relNode, xlrec->blkno);
xlrec->rnode.spcNode, xlrec->rnode.dbNode,
xlrec->rnode.relNode, xlrec->blkno);
}
else
appendStringInfo(buf, "UNKNOWN");