mirror of
https://github.com/postgres/postgres.git
synced 2025-09-08 00:47:37 +03:00
pgindent run on all C files. Java run to follow. initdb/regression
tests pass.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.45 2001/10/01 05:36:13 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.46 2001/10/25 05:49:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -183,8 +183,8 @@ InitBufferPool(void)
|
||||
|
||||
/*
|
||||
* link the buffers into a circular, doubly-linked list to
|
||||
* initialize free list, and initialize the buffer headers.
|
||||
* Still don't know anything about replacement strategy in this file.
|
||||
* initialize free list, and initialize the buffer headers. Still
|
||||
* don't know anything about replacement strategy in this file.
|
||||
*/
|
||||
for (i = 0; i < Data_Descriptors; block += BLCKSZ, buf++, i++)
|
||||
{
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.24 2001/10/05 17:28:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.25 2001/10/25 05:49:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -61,7 +61,7 @@ InitBufTable(void)
|
||||
BufferDesc *
|
||||
BufTableLookup(BufferTag *tagPtr)
|
||||
{
|
||||
BufferLookupEnt *result;
|
||||
BufferLookupEnt *result;
|
||||
|
||||
if (tagPtr->blockNum == P_NEW)
|
||||
return NULL;
|
||||
@@ -80,7 +80,7 @@ BufTableLookup(BufferTag *tagPtr)
|
||||
bool
|
||||
BufTableDelete(BufferDesc *buf)
|
||||
{
|
||||
BufferLookupEnt *result;
|
||||
BufferLookupEnt *result;
|
||||
|
||||
/*
|
||||
* buffer not initialized or has been removed from table already.
|
||||
@@ -116,7 +116,7 @@ BufTableDelete(BufferDesc *buf)
|
||||
bool
|
||||
BufTableInsert(BufferDesc *buf)
|
||||
{
|
||||
BufferLookupEnt *result;
|
||||
BufferLookupEnt *result;
|
||||
bool found;
|
||||
|
||||
/* cannot insert it twice */
|
||||
@@ -152,5 +152,4 @@ DBG_LookupListCheck(int nlookup)
|
||||
|
||||
hash_stats("Shared", SharedBufHash);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.117 2001/09/29 04:02:23 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.118 2001/10/25 05:49:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -88,9 +88,9 @@ extern void AbortBufferIO(void);
|
||||
#define BUFFER_IS_BROKEN(buf) ((buf->flags & BM_IO_ERROR) && !(buf->flags & BM_DIRTY))
|
||||
|
||||
static Buffer ReadBufferInternal(Relation reln, BlockNumber blockNum,
|
||||
bool bufferLockHeld);
|
||||
bool bufferLockHeld);
|
||||
static BufferDesc *BufferAlloc(Relation reln, BlockNumber blockNum,
|
||||
bool *foundPtr);
|
||||
bool *foundPtr);
|
||||
static int ReleaseBufferWithBufferLock(Buffer buffer);
|
||||
static int BufferReplace(BufferDesc *bufHdr);
|
||||
void PrintBufferDescs(void);
|
||||
@@ -173,6 +173,7 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
|
||||
blockNum = reln->rd_nblocks = smgrnblocks(DEFAULT_SMGR, reln);
|
||||
reln->rd_nblocks++;
|
||||
}
|
||||
|
||||
/*
|
||||
* lookup the buffer. IO_IN_PROGRESS is set if the requested
|
||||
* block is not currently in memory.
|
||||
@@ -198,12 +199,14 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
|
||||
/* That is, we're done if we expected to be able to find it ... */
|
||||
if (!isExtend)
|
||||
return BufferDescriptorGetBuffer(bufHdr);
|
||||
|
||||
/*
|
||||
* If we found a buffer when we were expecting to extend the relation,
|
||||
* the implication is that a buffer was already created for the next
|
||||
* page position, but then smgrextend failed to write the page.
|
||||
* We'd better try the smgrextend again. But since BufferAlloc
|
||||
* won't have done StartBufferIO, we must do that first.
|
||||
* If we found a buffer when we were expecting to extend the
|
||||
* relation, the implication is that a buffer was already created
|
||||
* for the next page position, but then smgrextend failed to write
|
||||
* the page. We'd better try the smgrextend again. But since
|
||||
* BufferAlloc won't have done StartBufferIO, we must do that
|
||||
* first.
|
||||
*/
|
||||
if (!isLocalBuf)
|
||||
{
|
||||
@@ -308,7 +311,6 @@ BufferAlloc(Relation reln,
|
||||
buf = BufTableLookup(&newTag);
|
||||
if (buf != NULL)
|
||||
{
|
||||
|
||||
/*
|
||||
* Found it. Now, (a) pin the buffer so no one steals it from the
|
||||
* buffer pool, (b) check IO_IN_PROGRESS, someone may be faulting
|
||||
@@ -326,7 +328,6 @@ BufferAlloc(Relation reln,
|
||||
}
|
||||
if (BUFFER_IS_BROKEN(buf))
|
||||
{
|
||||
|
||||
/*
|
||||
* I couldn't understand the following old comment. If there's
|
||||
* no IO for the buffer and the buffer is BROKEN,it should be
|
||||
@@ -481,7 +482,6 @@ BufferAlloc(Relation reln,
|
||||
buf2 = BufTableLookup(&newTag);
|
||||
if (buf2 != NULL)
|
||||
{
|
||||
|
||||
/*
|
||||
* Found it. Someone has already done what we're about to
|
||||
* do. We'll just handle this as if it were found in the
|
||||
@@ -853,9 +853,9 @@ WaitIO(BufferDesc *buf)
|
||||
/*
|
||||
* Changed to wait until there's no IO - Inoue 01/13/2000
|
||||
*
|
||||
* Note this is *necessary* because an error abort in the process
|
||||
* doing I/O could release the io_in_progress_lock prematurely.
|
||||
* See AbortBufferIO.
|
||||
* Note this is *necessary* because an error abort in the process doing
|
||||
* I/O could release the io_in_progress_lock prematurely. See
|
||||
* AbortBufferIO.
|
||||
*/
|
||||
while ((buf->flags & BM_IO_IN_PROGRESS) != 0)
|
||||
{
|
||||
@@ -930,7 +930,7 @@ ResetBufferPool(bool isCommit)
|
||||
{
|
||||
BufferDesc *buf = &BufferDescriptors[i];
|
||||
|
||||
PrivateRefCount[i] = 1; /* make sure we release shared pin */
|
||||
PrivateRefCount[i] = 1; /* make sure we release shared pin */
|
||||
LWLockAcquire(BufMgrLock, LW_EXCLUSIVE);
|
||||
UnpinBuffer(buf);
|
||||
LWLockRelease(BufMgrLock);
|
||||
@@ -1090,9 +1090,9 @@ BlockNumber
|
||||
RelationGetNumberOfBlocks(Relation relation)
|
||||
{
|
||||
/*
|
||||
* relation->rd_nblocks should be accurate already if the relation
|
||||
* is myxactonly. (XXX how safe is that really?) Don't call smgr
|
||||
* on a view, either.
|
||||
* relation->rd_nblocks should be accurate already if the relation is
|
||||
* myxactonly. (XXX how safe is that really?) Don't call smgr on a
|
||||
* view, either.
|
||||
*/
|
||||
if (relation->rd_rel->relkind == RELKIND_VIEW)
|
||||
relation->rd_nblocks = 0;
|
||||
@@ -1147,7 +1147,6 @@ DropRelationBuffers(Relation rel)
|
||||
recheck:
|
||||
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
|
||||
{
|
||||
|
||||
/*
|
||||
* If there is I/O in progress, better wait till it's done;
|
||||
* don't want to delete the relation out from under someone
|
||||
@@ -1231,7 +1230,6 @@ DropRelFileNodeBuffers(RelFileNode rnode)
|
||||
recheck:
|
||||
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode))
|
||||
{
|
||||
|
||||
/*
|
||||
* If there is I/O in progress, better wait till it's done;
|
||||
* don't want to delete the relation out from under someone
|
||||
@@ -1307,7 +1305,6 @@ recheck:
|
||||
*/
|
||||
if (bufHdr->tag.rnode.tblNode == dbid)
|
||||
{
|
||||
|
||||
/*
|
||||
* If there is I/O in progress, better wait till it's done;
|
||||
* don't want to delete the database out from under someone
|
||||
@@ -1428,7 +1425,6 @@ BufferPoolBlowaway()
|
||||
BufTableDelete(&BufferDescriptors[i - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@@ -1681,7 +1677,6 @@ refcount = %ld, file: %s, line: %d\n",
|
||||
PrivateRefCount[buffer - 1], file, line);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef NOT_USED
|
||||
@@ -1701,7 +1696,6 @@ refcount = %ld, file: %s, line: %d\n",
|
||||
PrivateRefCount[buffer - 1], file, line);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef NOT_USED
|
||||
@@ -1742,7 +1736,6 @@ refcount = %ld, file: %s, line: %d\n",
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef BMTRACE
|
||||
@@ -1888,7 +1881,6 @@ _bm_die(Oid dbId, Oid relId, int blkNo, int bufNo,
|
||||
|
||||
kill(getpid(), SIGILL);
|
||||
}
|
||||
|
||||
#endif /* BMTRACE */
|
||||
|
||||
/*
|
||||
@@ -1943,7 +1935,7 @@ UnlockBuffers(void)
|
||||
|
||||
for (i = 0; i < NBuffers; i++)
|
||||
{
|
||||
bits8 buflocks = BufferLocks[i];
|
||||
bits8 buflocks = BufferLocks[i];
|
||||
|
||||
if (buflocks == 0)
|
||||
continue;
|
||||
@@ -1960,9 +1952,11 @@ UnlockBuffers(void)
|
||||
if (buflocks & BL_PIN_COUNT_LOCK)
|
||||
{
|
||||
LWLockAcquire(BufMgrLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Don't complain if flag bit not set; it could have been reset
|
||||
* but we got a cancel/die interrupt before getting the signal.
|
||||
* Don't complain if flag bit not set; it could have been
|
||||
* reset but we got a cancel/die interrupt before getting the
|
||||
* signal.
|
||||
*/
|
||||
if ((buf->flags & BM_PIN_COUNT_WAITER) != 0 &&
|
||||
buf->wait_backend_id == MyBackendId)
|
||||
@@ -1992,13 +1986,9 @@ LockBuffer(Buffer buffer, int mode)
|
||||
buf = &(BufferDescriptors[buffer - 1]);
|
||||
|
||||
if (mode == BUFFER_LOCK_UNLOCK)
|
||||
{
|
||||
LWLockRelease(buf->cntx_lock);
|
||||
}
|
||||
else if (mode == BUFFER_LOCK_SHARE)
|
||||
{
|
||||
LWLockAcquire(buf->cntx_lock, LW_SHARED);
|
||||
}
|
||||
else if (mode == BUFFER_LOCK_EXCLUSIVE)
|
||||
{
|
||||
LWLockAcquire(buf->cntx_lock, LW_EXCLUSIVE);
|
||||
@@ -2012,9 +2002,7 @@ LockBuffer(Buffer buffer, int mode)
|
||||
buf->cntxDirty = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(ERROR, "LockBuffer: unknown lock mode %d", mode);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2163,7 +2151,6 @@ InitBufferIO(void)
|
||||
{
|
||||
InProgressBuf = (BufferDesc *) 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -2180,11 +2167,11 @@ AbortBufferIO(void)
|
||||
if (buf)
|
||||
{
|
||||
/*
|
||||
* Since LWLockReleaseAll has already been called,
|
||||
* we're not holding the buffer's io_in_progress_lock.
|
||||
* We have to re-acquire it so that we can use TerminateBufferIO.
|
||||
* Anyone who's executing WaitIO on the buffer will be in a busy spin
|
||||
* until we succeed in doing this.
|
||||
* Since LWLockReleaseAll has already been called, we're not
|
||||
* holding the buffer's io_in_progress_lock. We have to re-acquire
|
||||
* it so that we can use TerminateBufferIO. Anyone who's executing
|
||||
* WaitIO on the buffer will be in a busy spin until we succeed in
|
||||
* doing this.
|
||||
*/
|
||||
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/freelist.c,v 1.25 2001/09/29 04:02:23 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/freelist.c,v 1.26 2001/10/25 05:49:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -91,7 +91,7 @@ AddBufferToFreelist(BufferDesc *bf)
|
||||
void
|
||||
PinBuffer(BufferDesc *buf)
|
||||
{
|
||||
int b = BufferDescriptorGetBuffer(buf) - 1;
|
||||
int b = BufferDescriptorGetBuffer(buf) - 1;
|
||||
|
||||
if (buf->refcount == 0)
|
||||
{
|
||||
@@ -129,7 +129,6 @@ refcount = %ld, file: %s, line: %d\n",
|
||||
PrivateRefCount[buffer - 1], file, line);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#undef UnpinBuffer
|
||||
@@ -143,7 +142,7 @@ refcount = %ld, file: %s, line: %d\n",
|
||||
void
|
||||
UnpinBuffer(BufferDesc *buf)
|
||||
{
|
||||
int b = BufferDescriptorGetBuffer(buf) - 1;
|
||||
int b = BufferDescriptorGetBuffer(buf) - 1;
|
||||
|
||||
IsNotInQueue(buf);
|
||||
Assert(buf->refcount > 0);
|
||||
@@ -186,7 +185,6 @@ refcount = %ld, file: %s, line: %d\n",
|
||||
PrivateRefCount[buffer - 1], file, line);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -277,7 +275,6 @@ DBG_FreeListCheck(int nfree)
|
||||
printf("\tfree list corrupted: %d-th buffer is %d\n",
|
||||
nfree, buf->buf_id);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef NOT_USED
|
||||
@@ -312,5 +309,4 @@ PrintBufferFreeList()
|
||||
buf = &(BufferDescriptors[buf->freeNext]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.10 2001/03/22 03:59:45 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.11 2001/10/25 05:49:41 momjian Exp $
|
||||
*
|
||||
* NOTES:
|
||||
*
|
||||
@@ -135,7 +135,7 @@ extendBufFile(BufFile *file)
|
||||
* multiple temporary files if more than MAX_PHYSICAL_FILESIZE bytes are
|
||||
* written to it).
|
||||
*/
|
||||
BufFile *
|
||||
BufFile *
|
||||
BufFileCreateTemp(void)
|
||||
{
|
||||
BufFile *file;
|
||||
@@ -158,12 +158,11 @@ BufFileCreateTemp(void)
|
||||
* to attach a BufFile to a non-temporary file. Note that BufFiles created
|
||||
* in this way CANNOT be expanded into multiple files.
|
||||
*/
|
||||
BufFile *
|
||||
BufFile *
|
||||
BufFileCreate(File file)
|
||||
{
|
||||
return makeBufFile(file);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -254,7 +253,6 @@ BufFileDumpBuffer(BufFile *file)
|
||||
*/
|
||||
while (wpos < file->nbytes)
|
||||
{
|
||||
|
||||
/*
|
||||
* Advance to next component file if necessary and possible.
|
||||
*/
|
||||
@@ -489,7 +487,6 @@ BufFileSeek(BufFile *file, int fileno, long offset, int whence)
|
||||
newOffset >= file->curOffset &&
|
||||
newOffset <= file->curOffset + file->nbytes)
|
||||
{
|
||||
|
||||
/*
|
||||
* Seek is to a point within existing buffer; we can just adjust
|
||||
* pos-within-buffer, without flushing buffer. Note this is OK
|
||||
@@ -575,5 +572,4 @@ BufFileTellBlock(BufFile *file)
|
||||
blknum += file->curFile * RELSEG_SIZE;
|
||||
return blknum;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.84 2001/09/30 18:57:45 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.85 2001/10/25 05:49:41 momjian Exp $
|
||||
*
|
||||
* NOTES:
|
||||
*
|
||||
@@ -92,7 +92,7 @@
|
||||
* far beyond what they can really support. This GUC parameter limits what
|
||||
* we will believe.
|
||||
*/
|
||||
int max_files_per_process = 1000;
|
||||
int max_files_per_process = 1000;
|
||||
|
||||
|
||||
/* Debugging.... */
|
||||
@@ -122,7 +122,7 @@ typedef struct vfd
|
||||
#define FD_TEMPORARY (1 << 1)/* should be unlinked when closed */
|
||||
|
||||
File nextFree; /* link to next free VFD, if in freelist */
|
||||
File lruMoreRecently;/* doubly linked recency-of-use list */
|
||||
File lruMoreRecently; /* doubly linked recency-of-use list */
|
||||
File lruLessRecently;
|
||||
long seekPos; /* current logical file position */
|
||||
char *fileName; /* name of file, or NULL for unused VFD */
|
||||
@@ -307,16 +307,16 @@ pg_nofile(void)
|
||||
elog(DEBUG, "pg_nofile: sysconf(_SC_OPEN_MAX) failed; using %ld",
|
||||
no_files);
|
||||
}
|
||||
#else /* !HAVE_SYSCONF */
|
||||
#else /* !HAVE_SYSCONF */
|
||||
#ifdef NOFILE
|
||||
no_files = (long) NOFILE;
|
||||
#else
|
||||
no_files = (long) max_files_per_process;
|
||||
#endif
|
||||
#endif /* HAVE_SYSCONF */
|
||||
#endif /* HAVE_SYSCONF */
|
||||
|
||||
/*
|
||||
* Some platforms return hopelessly optimistic values. Apply a
|
||||
* Some platforms return hopelessly optimistic values. Apply a
|
||||
* configurable upper limit.
|
||||
*/
|
||||
if (no_files > (long) max_files_per_process)
|
||||
@@ -355,7 +355,6 @@ _dump_lru(void)
|
||||
sprintf(buf + strlen(buf), "LEAST");
|
||||
elog(DEBUG, buf);
|
||||
}
|
||||
|
||||
#endif /* FDDEBUG */
|
||||
|
||||
static void
|
||||
@@ -497,7 +496,6 @@ ReleaseLruFile(void)
|
||||
|
||||
if (nfile > 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* There are opened files and so there should be at least one used
|
||||
* vfd in the ring.
|
||||
@@ -537,7 +535,6 @@ AllocateVfd(void)
|
||||
|
||||
if (VfdCache[0].nextFree == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* The free list is empty so it is time to increase the size of
|
||||
* the array. We choose to double it each time this happens.
|
||||
@@ -550,8 +547,8 @@ AllocateVfd(void)
|
||||
newCacheSize = 32;
|
||||
|
||||
/*
|
||||
* Be careful not to clobber VfdCache ptr if realloc fails;
|
||||
* we will need it during proc_exit cleanup!
|
||||
* Be careful not to clobber VfdCache ptr if realloc fails; we
|
||||
* will need it during proc_exit cleanup!
|
||||
*/
|
||||
newVfdCache = (Vfd *) realloc(VfdCache, sizeof(Vfd) * newCacheSize);
|
||||
if (newVfdCache == NULL)
|
||||
@@ -624,9 +621,7 @@ filepath(const char *filename)
|
||||
sprintf(buf, "%s/%s", DatabasePath, filename);
|
||||
}
|
||||
else
|
||||
{
|
||||
buf = pstrdup(filename);
|
||||
}
|
||||
|
||||
#ifdef FILEDEBUG
|
||||
printf("filepath: path is %s\n", buf);
|
||||
@@ -657,7 +652,6 @@ FileAccess(File file)
|
||||
}
|
||||
else if (VfdCache[0].lruLessRecently != file)
|
||||
{
|
||||
|
||||
/*
|
||||
* We now know that the file is open and that it is not the last
|
||||
* one accessed, so we need to move it to the head of the Lru
|
||||
@@ -682,7 +676,6 @@ FileInvalidate(File file)
|
||||
if (!FileIsNotOpen(file))
|
||||
LruDelete(file);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static File
|
||||
@@ -798,15 +791,15 @@ OpenTemporaryFile(void)
|
||||
0600);
|
||||
if (file <= 0)
|
||||
{
|
||||
char *dirpath;
|
||||
char *dirpath;
|
||||
|
||||
/*
|
||||
* We might need to create the pg_tempfiles subdirectory, if
|
||||
* no one has yet done so.
|
||||
* We might need to create the pg_tempfiles subdirectory, if no
|
||||
* one has yet done so.
|
||||
*
|
||||
* Don't check for error from mkdir; it could fail if someone else
|
||||
* just did the same thing. If it doesn't work then we'll bomb out
|
||||
* on the second create attempt, instead.
|
||||
* just did the same thing. If it doesn't work then we'll bomb
|
||||
* out on the second create attempt, instead.
|
||||
*/
|
||||
dirpath = filepath(PG_TEMP_FILES_DIR);
|
||||
mkdir(dirpath, S_IRWXU);
|
||||
@@ -1009,7 +1002,6 @@ FileTell(File file)
|
||||
file, VfdCache[file].fileName));
|
||||
return VfdCache[file].seekPos;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int
|
||||
@@ -1077,7 +1069,6 @@ FileSync(File file)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't use FileAccess() because we don't want to force the
|
||||
* file to the front of the LRU ring; we aren't expecting to
|
||||
@@ -1275,17 +1266,17 @@ AtEOXact_Files(void)
|
||||
void
|
||||
RemovePgTempFiles(void)
|
||||
{
|
||||
char db_path[MAXPGPATH];
|
||||
char temp_path[MAXPGPATH];
|
||||
char rm_path[MAXPGPATH];
|
||||
char db_path[MAXPGPATH];
|
||||
char temp_path[MAXPGPATH];
|
||||
char rm_path[MAXPGPATH];
|
||||
DIR *db_dir;
|
||||
DIR *temp_dir;
|
||||
struct dirent *db_de;
|
||||
struct dirent *temp_de;
|
||||
struct dirent *db_de;
|
||||
struct dirent *temp_de;
|
||||
|
||||
/*
|
||||
* Cycle through pg_tempfiles for all databases
|
||||
* and remove old temp files.
|
||||
* Cycle through pg_tempfiles for all databases and remove old temp
|
||||
* files.
|
||||
*/
|
||||
snprintf(db_path, sizeof(db_path), "%s/base", DataDir);
|
||||
if ((db_dir = opendir(db_path)) != NULL)
|
||||
@@ -1317,14 +1308,12 @@ RemovePgTempFiles(void)
|
||||
if (strncmp(temp_de->d_name,
|
||||
PG_TEMP_FILE_PREFIX,
|
||||
strlen(PG_TEMP_FILE_PREFIX)) == 0)
|
||||
{
|
||||
unlink(rm_path);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* would prefer to use elog here, but it's not
|
||||
* up and running during postmaster startup...
|
||||
* would prefer to use elog here, but it's not up
|
||||
* and running during postmaster startup...
|
||||
*/
|
||||
fprintf(stderr,
|
||||
"Unexpected file found in temporary-files directory: %s\n",
|
||||
|
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.7 2001/10/05 17:28:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.8 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*
|
||||
* NOTES:
|
||||
@@ -19,13 +19,13 @@
|
||||
* These currently work as follows:
|
||||
*
|
||||
* The number of distinct relations tracked is limited by a configuration
|
||||
* variable (MaxFSMRelations). When this would be exceeded, we discard the
|
||||
* least frequently used relation. A previously-unknown relation is always
|
||||
* variable (MaxFSMRelations). When this would be exceeded, we discard the
|
||||
* least frequently used relation. A previously-unknown relation is always
|
||||
* entered into the map with useCount 1 on first reference, even if this
|
||||
* causes an existing entry with higher useCount to be discarded. This may
|
||||
* cause a little bit of thrashing among the bottom entries in the list,
|
||||
* but if we didn't do it then there'd be no way for a relation not in the
|
||||
* map to get in once the map is full. Note we allow a relation to be in the
|
||||
* map to get in once the map is full. Note we allow a relation to be in the
|
||||
* map even if no pages are currently stored for it: this allows us to track
|
||||
* its useCount & threshold, which may eventually go high enough to give it
|
||||
* priority for page storage.
|
||||
@@ -89,13 +89,14 @@ struct FSMHeader
|
||||
*/
|
||||
struct FSMRelation
|
||||
{
|
||||
RelFileNode key; /* hash key (must be first) */
|
||||
RelFileNode key; /* hash key (must be first) */
|
||||
FSMRelation *nextRel; /* next rel in useCount order */
|
||||
FSMRelation *priorRel; /* prior rel in useCount order */
|
||||
int useCount; /* use count for prioritizing rels */
|
||||
Size threshold; /* minimum amount of free space to keep */
|
||||
int nextPage; /* index (from 0) to start next search at */
|
||||
int numPages; /* total number of pages we have info about */
|
||||
int numPages; /* total number of pages we have info
|
||||
* about */
|
||||
int numChunks; /* number of FSMChunks allocated to rel */
|
||||
FSMChunk *relChunks; /* linked list of page info chunks */
|
||||
};
|
||||
@@ -109,21 +110,22 @@ struct FSMRelation
|
||||
* to the freelist; but there's no point in doing the compaction before that.
|
||||
*/
|
||||
|
||||
#define CHUNKPAGES 32 /* each chunk can store this many pages */
|
||||
#define CHUNKPAGES 32 /* each chunk can store this many pages */
|
||||
|
||||
struct FSMChunk
|
||||
{
|
||||
FSMChunk *next; /* linked-list link */
|
||||
int numPages; /* number of pages described here */
|
||||
BlockNumber pages[CHUNKPAGES]; /* page numbers within relation */
|
||||
ItemLength bytes[CHUNKPAGES]; /* free space available on each page */
|
||||
BlockNumber pages[CHUNKPAGES]; /* page numbers within relation */
|
||||
ItemLength bytes[CHUNKPAGES]; /* free space available on each
|
||||
* page */
|
||||
};
|
||||
|
||||
|
||||
int MaxFSMRelations; /* these are set by guc.c */
|
||||
int MaxFSMPages;
|
||||
int MaxFSMRelations; /* these are set by guc.c */
|
||||
int MaxFSMPages;
|
||||
|
||||
static FSMHeader *FreeSpaceMap; /* points to FSMHeader in shared memory */
|
||||
static FSMHeader *FreeSpaceMap; /* points to FSMHeader in shared memory */
|
||||
|
||||
|
||||
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
|
||||
@@ -134,16 +136,16 @@ static void unlink_fsm_rel(FSMRelation *fsmrel);
|
||||
static void free_chunk_chain(FSMChunk *fchunk);
|
||||
static BlockNumber find_free_space(FSMRelation *fsmrel, Size spaceNeeded);
|
||||
static void fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page,
|
||||
Size spaceAvail);
|
||||
Size spaceAvail);
|
||||
static bool lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
|
||||
FSMChunk **outChunk, int *outChunkRelIndex);
|
||||
FSMChunk **outChunk, int *outChunkRelIndex);
|
||||
static bool insert_fsm_page_entry(FSMRelation *fsmrel,
|
||||
BlockNumber page, Size spaceAvail,
|
||||
FSMChunk *chunk, int chunkRelIndex);
|
||||
BlockNumber page, Size spaceAvail,
|
||||
FSMChunk *chunk, int chunkRelIndex);
|
||||
static bool push_fsm_page_entry(BlockNumber page, Size spaceAvail,
|
||||
FSMChunk *chunk, int chunkRelIndex);
|
||||
FSMChunk *chunk, int chunkRelIndex);
|
||||
static void delete_fsm_page_entry(FSMRelation *fsmrel, FSMChunk *chunk,
|
||||
int chunkRelIndex);
|
||||
int chunkRelIndex);
|
||||
static void compact_fsm_page_list(FSMRelation *fsmrel);
|
||||
static void acquire_fsm_free_space(void);
|
||||
|
||||
@@ -241,32 +243,34 @@ FreeSpaceShmemSize(void)
|
||||
* will turn out to have too little space available by the time the caller
|
||||
* gets a lock on it. In that case, the caller should report the actual
|
||||
* amount of free space available on that page (via RecordFreeSpace) and
|
||||
* then try again. If InvalidBlockNumber is returned, extend the relation.
|
||||
* then try again. If InvalidBlockNumber is returned, extend the relation.
|
||||
*/
|
||||
BlockNumber
|
||||
GetPageWithFreeSpace(RelFileNode *rel, Size spaceNeeded)
|
||||
{
|
||||
FSMRelation *fsmrel;
|
||||
BlockNumber freepage;
|
||||
BlockNumber freepage;
|
||||
|
||||
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* We always add a rel to the hashtable when it is inquired about.
|
||||
*/
|
||||
fsmrel = create_fsm_rel(rel);
|
||||
|
||||
/*
|
||||
* Adjust the threshold towards the space request. This essentially
|
||||
* Adjust the threshold towards the space request. This essentially
|
||||
* implements an exponential moving average with an equivalent period
|
||||
* of about 63 requests. Ignore silly requests, however, to ensure
|
||||
* that the average stays in bounds.
|
||||
*
|
||||
* In theory, if the threshold increases here we should immediately
|
||||
* delete any pages that fall below the new threshold. In practice
|
||||
* it seems OK to wait until we have a need to compact space.
|
||||
* delete any pages that fall below the new threshold. In practice it
|
||||
* seems OK to wait until we have a need to compact space.
|
||||
*/
|
||||
if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
|
||||
{
|
||||
int cur_avg = (int) fsmrel->threshold;
|
||||
int cur_avg = (int) fsmrel->threshold;
|
||||
|
||||
cur_avg += ((int) spaceNeeded - cur_avg) / 32;
|
||||
fsmrel->threshold = (Size) cur_avg;
|
||||
@@ -293,6 +297,7 @@ RecordFreeSpace(RelFileNode *rel, BlockNumber page, Size spaceAvail)
|
||||
AssertArg(spaceAvail < BLCKSZ);
|
||||
|
||||
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* We choose not to add rels to the hashtable unless they've been
|
||||
* inquired about with GetPageWithFreeSpace. Also, a Record operation
|
||||
@@ -315,27 +320,29 @@ RecordAndGetPageWithFreeSpace(RelFileNode *rel,
|
||||
Size spaceNeeded)
|
||||
{
|
||||
FSMRelation *fsmrel;
|
||||
BlockNumber freepage;
|
||||
BlockNumber freepage;
|
||||
|
||||
/* Sanity check: ensure spaceAvail will fit into ItemLength */
|
||||
AssertArg(oldSpaceAvail < BLCKSZ);
|
||||
|
||||
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* We always add a rel to the hashtable when it is inquired about.
|
||||
*/
|
||||
fsmrel = create_fsm_rel(rel);
|
||||
|
||||
/*
|
||||
* Adjust the threshold towards the space request, same as in
|
||||
* GetPageWithFreeSpace.
|
||||
*
|
||||
* Note that we do this before storing data for oldPage, which means
|
||||
* this isn't exactly equivalent to Record followed by Get; but it
|
||||
* seems appropriate to adjust the threshold first.
|
||||
* Note that we do this before storing data for oldPage, which means this
|
||||
* isn't exactly equivalent to Record followed by Get; but it seems
|
||||
* appropriate to adjust the threshold first.
|
||||
*/
|
||||
if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
|
||||
{
|
||||
int cur_avg = (int) fsmrel->threshold;
|
||||
int cur_avg = (int) fsmrel->threshold;
|
||||
|
||||
cur_avg += ((int) spaceNeeded - cur_avg) / 32;
|
||||
fsmrel->threshold = (Size) cur_avg;
|
||||
@@ -356,7 +363,7 @@ RecordAndGetPageWithFreeSpace(RelFileNode *rel,
|
||||
* pages in that page number range (inclusive). This allows obsolete info
|
||||
* to be discarded. Second, if nPages > 0, record the page numbers and free
|
||||
* space amounts in the given arrays. As with RecordFreeSpace, the FSM is at
|
||||
* liberty to discard some of the information. However, it *must* discard
|
||||
* liberty to discard some of the information. However, it *must* discard
|
||||
* previously stored info in the minPage..maxPage range (for example, this
|
||||
* case is used to remove info about deleted pages during relation truncation).
|
||||
*/
|
||||
@@ -390,7 +397,7 @@ MultiRecordFreeSpace(RelFileNode *rel,
|
||||
done = false;
|
||||
while (chunk && !done)
|
||||
{
|
||||
int numPages = chunk->numPages;
|
||||
int numPages = chunk->numPages;
|
||||
|
||||
for (; chunkRelIndex < numPages; chunkRelIndex++)
|
||||
{
|
||||
@@ -407,22 +414,23 @@ MultiRecordFreeSpace(RelFileNode *rel,
|
||||
/* Now compact out the zeroed entries */
|
||||
compact_fsm_page_list(fsmrel);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new entries, if appropriate.
|
||||
*
|
||||
* XXX we could probably be smarter about this than doing it
|
||||
* completely separately for each one. FIXME later.
|
||||
* completely separately for each one. FIXME later.
|
||||
*
|
||||
* One thing we can do is short-circuit the process entirely if
|
||||
* a page (a) has too little free space to be recorded, and (b)
|
||||
* is within the minPage..maxPage range --- then we deleted any
|
||||
* old entry above, and we aren't going to make a new one.
|
||||
* This is particularly useful since in most cases, all the passed
|
||||
* pages will in fact be in the minPage..maxPage range.
|
||||
* One thing we can do is short-circuit the process entirely if a
|
||||
* page (a) has too little free space to be recorded, and (b) is
|
||||
* within the minPage..maxPage range --- then we deleted any old
|
||||
* entry above, and we aren't going to make a new one. This is
|
||||
* particularly useful since in most cases, all the passed pages
|
||||
* will in fact be in the minPage..maxPage range.
|
||||
*/
|
||||
for (i = 0; i < nPages; i++)
|
||||
{
|
||||
BlockNumber page = pages[i];
|
||||
BlockNumber page = pages[i];
|
||||
Size avail = spaceAvail[i];
|
||||
|
||||
if (avail >= fsmrel->threshold ||
|
||||
@@ -470,7 +478,7 @@ FreeSpaceMapForgetDatabase(Oid dbid)
|
||||
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
|
||||
for (fsmrel = FreeSpaceMap->relList; fsmrel; fsmrel = nextrel)
|
||||
{
|
||||
nextrel = fsmrel->nextRel; /* in case we delete it */
|
||||
nextrel = fsmrel->nextRel; /* in case we delete it */
|
||||
if (fsmrel->key.tblNode == dbid)
|
||||
delete_fsm_rel(fsmrel);
|
||||
}
|
||||
@@ -525,7 +533,7 @@ create_fsm_rel(RelFileNode *rel)
|
||||
{
|
||||
/* New hashtable entry, initialize it (hash_search set the key) */
|
||||
fsmrel->useCount = 1;
|
||||
fsmrel->threshold = BLCKSZ/2; /* starting point for new entry */
|
||||
fsmrel->threshold = BLCKSZ / 2; /* starting point for new entry */
|
||||
fsmrel->nextPage = 0;
|
||||
fsmrel->numPages = 0;
|
||||
fsmrel->numChunks = 0;
|
||||
@@ -533,6 +541,7 @@ create_fsm_rel(RelFileNode *rel)
|
||||
/* Discard lowest-priority existing rel, if we are over limit */
|
||||
if (FreeSpaceMap->numRels >= MaxFSMRelations)
|
||||
delete_fsm_rel(FreeSpaceMap->relListTail);
|
||||
|
||||
/*
|
||||
* Add new entry in front of any others with useCount 1 (since it
|
||||
* is more recently used than them).
|
||||
@@ -545,18 +554,16 @@ create_fsm_rel(RelFileNode *rel)
|
||||
}
|
||||
else
|
||||
{
|
||||
int myCount;
|
||||
int myCount;
|
||||
|
||||
/* Existing entry, advance its useCount */
|
||||
if (++(fsmrel->useCount) >= INT_MAX/2)
|
||||
if (++(fsmrel->useCount) >= INT_MAX / 2)
|
||||
{
|
||||
/* When useCounts threaten to overflow, reduce 'em all 2X */
|
||||
for (oldrel = FreeSpaceMap->relList;
|
||||
oldrel != NULL;
|
||||
oldrel = oldrel->nextRel)
|
||||
{
|
||||
oldrel->useCount >>= 1;
|
||||
}
|
||||
}
|
||||
/* If warranted, move it up the priority list */
|
||||
oldrel = fsmrel->priorRel;
|
||||
@@ -665,7 +672,7 @@ free_chunk_chain(FSMChunk *fchunk)
|
||||
|
||||
/*
|
||||
* Look to see if a page with at least the specified amount of space is
|
||||
* available in the given FSMRelation. If so, return its page number,
|
||||
* available in the given FSMRelation. If so, return its page number,
|
||||
* and advance the nextPage counter so that the next inquiry will return
|
||||
* a different page if possible. Return InvalidBlockNumber if no success.
|
||||
*/
|
||||
@@ -699,7 +706,7 @@ find_free_space(FSMRelation *fsmrel, Size spaceNeeded)
|
||||
/* Check the next page */
|
||||
if ((Size) curChunk->bytes[chunkRelIndex] >= spaceNeeded)
|
||||
{
|
||||
fsmrel->nextPage = pageIndex+1;
|
||||
fsmrel->nextPage = pageIndex + 1;
|
||||
return curChunk->pages[chunkRelIndex];
|
||||
}
|
||||
/* Advance pageIndex and chunkRelIndex, wrapping around if needed */
|
||||
@@ -739,12 +746,12 @@ fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail)
|
||||
/*
|
||||
* No existing entry; add one if spaceAvail exceeds threshold.
|
||||
*
|
||||
* CORNER CASE: if we have to do acquire_fsm_free_space then
|
||||
* our own threshold will increase, possibly meaning that we
|
||||
* shouldn't store the page after all. Loop to redo the test
|
||||
* if that happens. The loop also covers the possibility that
|
||||
* acquire_fsm_free_space must be executed more than once to
|
||||
* free any space (ie, thresholds must be more than doubled).
|
||||
* CORNER CASE: if we have to do acquire_fsm_free_space then our own
|
||||
* threshold will increase, possibly meaning that we shouldn't
|
||||
* store the page after all. Loop to redo the test if that
|
||||
* happens. The loop also covers the possibility that
|
||||
* acquire_fsm_free_space must be executed more than once to free
|
||||
* any space (ie, thresholds must be more than doubled).
|
||||
*/
|
||||
while (spaceAvail >= fsmrel->threshold)
|
||||
{
|
||||
@@ -755,6 +762,7 @@ fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail)
|
||||
acquire_fsm_free_space();
|
||||
if (spaceAvail < fsmrel->threshold)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Need to redo the lookup since our own page list may well
|
||||
* have lost entries, so position is not correct anymore.
|
||||
@@ -784,10 +792,10 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
|
||||
|
||||
for (chunk = fsmrel->relChunks; chunk; chunk = chunk->next)
|
||||
{
|
||||
int numPages = chunk->numPages;
|
||||
int numPages = chunk->numPages;
|
||||
|
||||
/* Can skip the chunk quickly if page must be after last in chunk */
|
||||
if (numPages > 0 && page <= chunk->pages[numPages-1])
|
||||
if (numPages > 0 && page <= chunk->pages[numPages - 1])
|
||||
{
|
||||
for (chunkRelIndex = 0; chunkRelIndex < numPages; chunkRelIndex++)
|
||||
{
|
||||
@@ -801,9 +809,10 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
|
||||
/* Should not get here, given above test */
|
||||
Assert(false);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are about to fall off the end, and there's space available
|
||||
* in the end chunk, return a pointer to it.
|
||||
* If we are about to fall off the end, and there's space
|
||||
* available in the end chunk, return a pointer to it.
|
||||
*/
|
||||
if (chunk->next == NULL && numPages < CHUNKPAGES)
|
||||
{
|
||||
@@ -812,6 +821,7 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Adding the page would require a new chunk (or, perhaps, compaction
|
||||
* of available free space --- not my problem here).
|
||||
@@ -838,7 +848,7 @@ insert_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail,
|
||||
{
|
||||
/* No free space within chunk list, so need another chunk */
|
||||
if ((newChunk = FreeSpaceMap->freeChunks) == NULL)
|
||||
return false; /* can't do it */
|
||||
return false; /* can't do it */
|
||||
FreeSpaceMap->freeChunks = newChunk->next;
|
||||
FreeSpaceMap->numFreeChunks--;
|
||||
newChunk->next = NULL;
|
||||
@@ -870,10 +880,11 @@ insert_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail,
|
||||
fsmrel->nextPage++; /* don't return same page twice running */
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is space available, but evidently it's before the place
|
||||
* where the page entry needs to go. Compact the list and try again.
|
||||
* This will require us to redo the search for the appropriate place.
|
||||
* There is space available, but evidently it's before the place where
|
||||
* the page entry needs to go. Compact the list and try again. This
|
||||
* will require us to redo the search for the appropriate place.
|
||||
*/
|
||||
compact_fsm_page_list(fsmrel);
|
||||
if (lookup_fsm_page_entry(fsmrel, page, &newChunk, &newChunkRelIndex))
|
||||
@@ -892,7 +903,7 @@ insert_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail,
|
||||
|
||||
/*
|
||||
* Auxiliary routine for insert_fsm_page_entry: try to push entries to the
|
||||
* right to insert at chunk/chunkRelIndex. Return TRUE if successful.
|
||||
* right to insert at chunk/chunkRelIndex. Return TRUE if successful.
|
||||
* Note that the FSMRelation's own fields are not updated.
|
||||
*/
|
||||
static bool
|
||||
@@ -906,17 +917,17 @@ push_fsm_page_entry(BlockNumber page, Size spaceAvail,
|
||||
if (chunk->next == NULL)
|
||||
return false; /* no space */
|
||||
/* try to push chunk's last item to next chunk */
|
||||
if (! push_fsm_page_entry(chunk->pages[CHUNKPAGES-1],
|
||||
chunk->bytes[CHUNKPAGES-1],
|
||||
chunk->next, 0))
|
||||
if (!push_fsm_page_entry(chunk->pages[CHUNKPAGES - 1],
|
||||
chunk->bytes[CHUNKPAGES - 1],
|
||||
chunk->next, 0))
|
||||
return false;
|
||||
/* successfully pushed it */
|
||||
chunk->numPages--;
|
||||
}
|
||||
for (i = chunk->numPages; i > chunkRelIndex; i--)
|
||||
{
|
||||
chunk->pages[i] = chunk->pages[i-1];
|
||||
chunk->bytes[i] = chunk->bytes[i-1];
|
||||
chunk->pages[i] = chunk->pages[i - 1];
|
||||
chunk->bytes[i] = chunk->bytes[i - 1];
|
||||
}
|
||||
chunk->numPages++;
|
||||
chunk->pages[chunkRelIndex] = page;
|
||||
@@ -939,12 +950,12 @@ delete_fsm_page_entry(FSMRelation *fsmrel, FSMChunk *chunk, int chunkRelIndex)
|
||||
lim = --chunk->numPages;
|
||||
for (i = chunkRelIndex; i < lim; i++)
|
||||
{
|
||||
chunk->pages[i] = chunk->pages[i+1];
|
||||
chunk->bytes[i] = chunk->bytes[i+1];
|
||||
chunk->pages[i] = chunk->pages[i + 1];
|
||||
chunk->bytes[i] = chunk->bytes[i + 1];
|
||||
}
|
||||
/* Compact the whole list if a chunk can be freed */
|
||||
fsmrel->numPages--;
|
||||
if (fsmrel->numPages <= (fsmrel->numChunks-1) * CHUNKPAGES)
|
||||
if (fsmrel->numPages <= (fsmrel->numChunks - 1) * CHUNKPAGES)
|
||||
compact_fsm_page_list(fsmrel);
|
||||
}
|
||||
|
||||
@@ -971,7 +982,7 @@ compact_fsm_page_list(FSMRelation *fsmrel)
|
||||
|
||||
while (srcChunk != NULL)
|
||||
{
|
||||
int srcPages = srcChunk->numPages;
|
||||
int srcPages = srcChunk->numPages;
|
||||
|
||||
while (srcIndex < srcPages)
|
||||
{
|
||||
@@ -1069,7 +1080,7 @@ DumpFreeSpace(void)
|
||||
nChunks = nPages = 0;
|
||||
for (chunk = fsmrel->relChunks; chunk; chunk = chunk->next)
|
||||
{
|
||||
int numPages = chunk->numPages;
|
||||
int numPages = chunk->numPages;
|
||||
|
||||
nChunks++;
|
||||
for (chunkRelIndex = 0; chunkRelIndex < numPages; chunkRelIndex++)
|
||||
@@ -1105,5 +1116,4 @@ DumpFreeSpace(void)
|
||||
fprintf(stderr, "DumpFreeSpace: %d chunks in list, but numFreeChunks = %d\n",
|
||||
nChunks, FreeSpaceMap->numFreeChunks);
|
||||
}
|
||||
|
||||
#endif /* FREESPACE_DEBUG */
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.71 2001/10/01 23:26:55 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.72 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@@ -114,7 +114,6 @@ static int on_proc_exit_index,
|
||||
void
|
||||
proc_exit(int code)
|
||||
{
|
||||
|
||||
/*
|
||||
* Once we set this flag, we are committed to exit. Any elog() will
|
||||
* NOT send control back to the main loop, but right back here.
|
||||
@@ -275,7 +274,6 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey,
|
||||
|
||||
if (semId < 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* Fail quietly if error indicates a collision with existing set.
|
||||
* One would expect EEXIST, given that we said IPC_EXCL, but
|
||||
@@ -592,7 +590,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
|
||||
"\nThis error usually means that PostgreSQL's request for a shared memory\n"
|
||||
"segment exceeded your kernel's SHMMAX parameter. You can either\n"
|
||||
"reduce the request size or reconfigure the kernel with larger SHMMAX.\n"
|
||||
"To reduce the request size (currently %u bytes), reduce\n"
|
||||
"To reduce the request size (currently %u bytes), reduce\n"
|
||||
"PostgreSQL's shared_buffers parameter (currently %d) and/or\n"
|
||||
"its max_connections parameter (currently %d).\n"
|
||||
"\n"
|
||||
@@ -607,8 +605,8 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
|
||||
else if (errno == ENOMEM)
|
||||
fprintf(stderr,
|
||||
"\nThis error usually means that PostgreSQL's request for a shared\n"
|
||||
"memory segment exceeded available memory or swap space.\n"
|
||||
"To reduce the request size (currently %u bytes), reduce\n"
|
||||
"memory segment exceeded available memory or swap space.\n"
|
||||
"To reduce the request size (currently %u bytes), reduce\n"
|
||||
"PostgreSQL's shared_buffers parameter (currently %d) and/or\n"
|
||||
"its max_connections parameter (currently %d).\n"
|
||||
"\n"
|
||||
@@ -623,7 +621,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
|
||||
"It occurs either if all available shared memory IDs have been taken,\n"
|
||||
"in which case you need to raise the SHMMNI parameter in your kernel,\n"
|
||||
"or because the system's overall limit for shared memory has been\n"
|
||||
"reached. If you cannot increase the shared memory limit,\n"
|
||||
"reached. If you cannot increase the shared memory limit,\n"
|
||||
"reduce PostgreSQL's shared memory request (currently %u bytes),\n"
|
||||
"by reducing its shared_buffers parameter (currently %d) and/or\n"
|
||||
"its max_connections parameter (currently %d).\n"
|
||||
@@ -710,7 +708,6 @@ SharedMemoryIsInUse(IpcMemoryKey shmKey, IpcMemoryId shmId)
|
||||
*/
|
||||
if (shmctl(shmId, IPC_STAT, &shmStat) < 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* EINVAL actually has multiple possible causes documented in the
|
||||
* shmctl man page, but we assume it must mean the segment no
|
||||
@@ -748,7 +745,7 @@ PrivateMemoryCreate(uint32 size)
|
||||
fprintf(stderr, "PrivateMemoryCreate: malloc(%u) failed\n", size);
|
||||
proc_exit(1);
|
||||
}
|
||||
MemSet(memAddress, 0, size);/* keep Purify quiet */
|
||||
MemSet(memAddress, 0, size); /* keep Purify quiet */
|
||||
|
||||
/* Register on-exit routine to release storage */
|
||||
on_shmem_exit(PrivateMemoryDelete, PointerGetDatum(memAddress));
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.43 2001/09/29 04:02:23 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.44 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -83,8 +83,8 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int maxBackends)
|
||||
InitShmemAllocation(seghdr);
|
||||
|
||||
/*
|
||||
* Now initialize LWLocks, which do shared memory allocation and
|
||||
* are needed for InitShmemIndex.
|
||||
* Now initialize LWLocks, which do shared memory allocation and are
|
||||
* needed for InitShmemIndex.
|
||||
*/
|
||||
CreateLWLocks();
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.61 2001/10/05 17:28:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.62 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -97,8 +97,9 @@ InitShmemAllocation(void *seghdr)
|
||||
ShmemEnd = ShmemBase + shmhdr->totalsize;
|
||||
|
||||
/*
|
||||
* Initialize the spinlock used by ShmemAlloc. We have to do the
|
||||
* space allocation the hard way, since ShmemAlloc can't be called yet.
|
||||
* Initialize the spinlock used by ShmemAlloc. We have to do the
|
||||
* space allocation the hard way, since ShmemAlloc can't be called
|
||||
* yet.
|
||||
*/
|
||||
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
|
||||
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
|
||||
@@ -234,7 +235,7 @@ InitShmemIndex(void)
|
||||
* table at once.
|
||||
*/
|
||||
HTAB *
|
||||
ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
long init_size, /* initial table size */
|
||||
long max_size, /* max size of the table */
|
||||
HASHCTL *infoP, /* info about key and bucket size */
|
||||
@@ -256,7 +257,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
|
||||
/* look it up in the shmem index */
|
||||
location = ShmemInitStruct(name,
|
||||
sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
|
||||
sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
|
||||
&found);
|
||||
|
||||
/*
|
||||
@@ -267,8 +268,8 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* if it already exists, attach to it rather than allocate and initialize
|
||||
* new space
|
||||
* if it already exists, attach to it rather than allocate and
|
||||
* initialize new space
|
||||
*/
|
||||
if (found)
|
||||
hash_flags |= HASH_ATTACH;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmqueue.c,v 1.16 2001/03/22 03:59:45 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmqueue.c,v 1.17 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@@ -33,7 +33,6 @@
|
||||
#define SHMQUEUE_DEBUG_ELOG NOTICE
|
||||
|
||||
static void dumpQ(SHM_QUEUE *q, char *s);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -59,7 +58,6 @@ SHMQueueIsDetached(SHM_QUEUE *queue)
|
||||
Assert(SHM_PTR_VALID(queue));
|
||||
return (queue)->prev == INVALID_OFFSET;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -152,7 +150,6 @@ SHMQueueInsertAfter(SHM_QUEUE *queue, SHM_QUEUE *elem)
|
||||
dumpQ(queue, "in SHMQueueInsertAfter: end");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* NOT_USED */
|
||||
|
||||
/*--------------------
|
||||
@@ -256,5 +253,4 @@ dumpQ(SHM_QUEUE *q, char *s)
|
||||
strcat(buf, elem);
|
||||
elog(SHMQUEUE_DEBUG_ELOG, "%s: %s", s, buf);
|
||||
}
|
||||
|
||||
#endif /* SHMQUEUE_DEBUG */
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.42 2001/09/29 15:29:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.43 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -42,7 +42,7 @@ CreateSharedInvalidationState(int maxBackends)
|
||||
void
|
||||
InitBackendSharedInvalidationState(void)
|
||||
{
|
||||
int flag;
|
||||
int flag;
|
||||
|
||||
LWLockAcquire(SInvalLock, LW_EXCLUSIVE);
|
||||
flag = SIBackendInit(shmInvalBuffer);
|
||||
@@ -75,8 +75,8 @@ SendSharedInvalidMessage(SharedInvalidationMessage *msg)
|
||||
*/
|
||||
void
|
||||
ReceiveSharedInvalidMessages(
|
||||
void (*invalFunction) (SharedInvalidationMessage *msg),
|
||||
void (*resetFunction) (void))
|
||||
void (*invalFunction) (SharedInvalidationMessage *msg),
|
||||
void (*resetFunction) (void))
|
||||
{
|
||||
SharedInvalidationMessage data;
|
||||
int getResult;
|
||||
@@ -85,19 +85,19 @@ ReceiveSharedInvalidMessages(
|
||||
for (;;)
|
||||
{
|
||||
/*
|
||||
* We can run SIGetDataEntry in parallel with other backends running
|
||||
* SIGetDataEntry for themselves, since each instance will modify
|
||||
* only fields of its own backend's ProcState, and no instance will
|
||||
* look at fields of other backends' ProcStates. We express this
|
||||
* by grabbing SInvalLock in shared mode. Note that this is not
|
||||
* exactly the normal (read-only) interpretation of a shared lock!
|
||||
* Look closely at the interactions before allowing SInvalLock to
|
||||
* be grabbed in shared mode for any other reason!
|
||||
* We can run SIGetDataEntry in parallel with other backends
|
||||
* running SIGetDataEntry for themselves, since each instance will
|
||||
* modify only fields of its own backend's ProcState, and no
|
||||
* instance will look at fields of other backends' ProcStates. We
|
||||
* express this by grabbing SInvalLock in shared mode. Note that
|
||||
* this is not exactly the normal (read-only) interpretation of a
|
||||
* shared lock! Look closely at the interactions before allowing
|
||||
* SInvalLock to be grabbed in shared mode for any other reason!
|
||||
*
|
||||
* The routines later in this file that use shared mode are okay
|
||||
* with this, because they aren't looking at the ProcState fields
|
||||
* associated with SI message transfer; they only use the ProcState
|
||||
* array as an easy way to find all the PROC structures.
|
||||
* The routines later in this file that use shared mode are okay with
|
||||
* this, because they aren't looking at the ProcState fields
|
||||
* associated with SI message transfer; they only use the
|
||||
* ProcState array as an easy way to find all the PROC structures.
|
||||
*/
|
||||
LWLockAcquire(SInvalLock, LW_SHARED);
|
||||
getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data);
|
||||
@@ -209,6 +209,7 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
|
||||
/* Fetch xid just once - see GetNewTransactionId */
|
||||
TransactionId pxid = proc->xid;
|
||||
|
||||
@@ -233,7 +234,7 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
* then only backends running in my own database are considered.
|
||||
*
|
||||
* This is used by VACUUM to decide which deleted tuples must be preserved
|
||||
* in a table. allDbs = TRUE is needed for shared relations, but allDbs =
|
||||
* in a table. allDbs = TRUE is needed for shared relations, but allDbs =
|
||||
* FALSE is sufficient for non-shared relations, since only backends in my
|
||||
* own database could ever see the tuples in them.
|
||||
*
|
||||
@@ -331,7 +332,7 @@ GetSnapshotData(bool serializable)
|
||||
/*--------------------
|
||||
* Unfortunately, we have to call ReadNewTransactionId() after acquiring
|
||||
* SInvalLock above. It's not good because ReadNewTransactionId() does
|
||||
* LWLockAcquire(XidGenLock), but *necessary*. We need to be sure that
|
||||
* LWLockAcquire(XidGenLock), but *necessary*. We need to be sure that
|
||||
* no transactions exit the set of currently-running transactions
|
||||
* between the time we fetch xmax and the time we finish building our
|
||||
* snapshot. Otherwise we could have a situation like this:
|
||||
@@ -364,18 +365,19 @@ GetSnapshotData(bool serializable)
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
|
||||
/* Fetch xid just once - see GetNewTransactionId */
|
||||
TransactionId xid = proc->xid;
|
||||
|
||||
/*
|
||||
* Ignore my own proc (dealt with my xid above), procs not
|
||||
* running a transaction, and xacts started since we read
|
||||
* the next transaction ID. There's no need to store XIDs
|
||||
* above what we got from ReadNewTransactionId, since we'll
|
||||
* treat them as running anyway.
|
||||
* running a transaction, and xacts started since we read the
|
||||
* next transaction ID. There's no need to store XIDs above
|
||||
* what we got from ReadNewTransactionId, since we'll treat
|
||||
* them as running anyway.
|
||||
*/
|
||||
if (proc == MyProc ||
|
||||
! TransactionIdIsNormal(xid) ||
|
||||
!TransactionIdIsNormal(xid) ||
|
||||
TransactionIdFollowsOrEquals(xid, snapshot->xmax))
|
||||
continue;
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.89 2001/08/10 20:52:24 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.90 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -416,9 +416,10 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
|
||||
bool neednextpage;
|
||||
bytea *datafield;
|
||||
bool pfreeit;
|
||||
struct {
|
||||
struct varlena hdr;
|
||||
char data[LOBLKSIZE];
|
||||
struct
|
||||
{
|
||||
struct varlena hdr;
|
||||
char data[LOBLKSIZE];
|
||||
} workbuf;
|
||||
char *workb = VARATT_DATA(&workbuf.hdr);
|
||||
HeapTuple newtup;
|
||||
@@ -462,7 +463,6 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
|
||||
|
||||
while (nwritten < nbytes)
|
||||
{
|
||||
|
||||
/*
|
||||
* If possible, get next pre-existing page of the LO. We assume
|
||||
* the indexscan will deliver these in order --- but there may be
|
||||
@@ -491,7 +491,6 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
|
||||
*/
|
||||
if (olddata != NULL && olddata->pageno == pageno)
|
||||
{
|
||||
|
||||
/*
|
||||
* Update an existing page with fresh data.
|
||||
*
|
||||
@@ -558,7 +557,6 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Write a brand new page.
|
||||
*
|
||||
|
@@ -12,7 +12,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.6 2001/09/30 00:45:47 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.7 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
* Interface:
|
||||
*
|
||||
@@ -58,7 +58,6 @@ static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints,
|
||||
|
||||
#ifdef DEBUG_DEADLOCK
|
||||
static void PrintLockQueue(LOCK *lock, const char *info);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -499,7 +498,6 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
/* This proc soft-blocks checkProc */
|
||||
if (FindLockCycleRecurse(proc, softEdges, nSoftEdges))
|
||||
{
|
||||
|
||||
/*
|
||||
* Add this edge to the list of soft edges in the
|
||||
* cycle
|
||||
@@ -533,7 +531,6 @@ FindLockCycleRecurse(PROC *checkProc,
|
||||
/* This proc soft-blocks checkProc */
|
||||
if (FindLockCycleRecurse(proc, softEdges, nSoftEdges))
|
||||
{
|
||||
|
||||
/*
|
||||
* Add this edge to the list of soft edges in the
|
||||
* cycle
|
||||
@@ -759,5 +756,4 @@ PrintLockQueue(LOCK *lock, const char *info)
|
||||
printf("\n");
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.51 2001/09/27 16:29:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.52 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -332,9 +332,9 @@ void
|
||||
XactLockTableWait(TransactionId xid)
|
||||
{
|
||||
LOCKTAG tag;
|
||||
TransactionId myxid = GetCurrentTransactionId();
|
||||
TransactionId myxid = GetCurrentTransactionId();
|
||||
|
||||
Assert(! TransactionIdEquals(xid, myxid));
|
||||
Assert(!TransactionIdEquals(xid, myxid));
|
||||
|
||||
MemSet(&tag, 0, sizeof(tag));
|
||||
tag.relId = XactLockTableId;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.100 2001/10/05 17:28:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.101 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Outside modules can create a lock table and acquire/release
|
||||
@@ -42,7 +42,7 @@
|
||||
|
||||
|
||||
/* This configuration variable is used to set the lock table size */
|
||||
int max_locks_per_xact; /* set by guc.c */
|
||||
int max_locks_per_xact; /* set by guc.c */
|
||||
|
||||
#define NLOCKENTS(maxBackends) (max_locks_per_xact * (maxBackends))
|
||||
|
||||
@@ -54,15 +54,15 @@ static void LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc,
|
||||
|
||||
static char *lock_mode_names[] =
|
||||
{
|
||||
"INVALID",
|
||||
"AccessShareLock",
|
||||
"RowShareLock",
|
||||
"RowExclusiveLock",
|
||||
"ShareUpdateExclusiveLock",
|
||||
"ShareLock",
|
||||
"ShareRowExclusiveLock",
|
||||
"ExclusiveLock",
|
||||
"AccessExclusiveLock"
|
||||
"INVALID",
|
||||
"AccessShareLock",
|
||||
"RowShareLock",
|
||||
"RowExclusiveLock",
|
||||
"ShareUpdateExclusiveLock",
|
||||
"ShareLock",
|
||||
"ShareRowExclusiveLock",
|
||||
"ExclusiveLock",
|
||||
"AccessExclusiveLock"
|
||||
};
|
||||
|
||||
|
||||
@@ -96,10 +96,10 @@ inline static bool
|
||||
LOCK_DEBUG_ENABLED(const LOCK *lock)
|
||||
{
|
||||
return
|
||||
(((LOCK_LOCKMETHOD(*lock) == DEFAULT_LOCKMETHOD && Trace_locks)
|
||||
|| (LOCK_LOCKMETHOD(*lock) == USER_LOCKMETHOD && Trace_userlocks))
|
||||
&& (lock->tag.relId >= (Oid) Trace_lock_oidmin))
|
||||
|| (Trace_lock_table && (lock->tag.relId == Trace_lock_table));
|
||||
(((LOCK_LOCKMETHOD(*lock) == DEFAULT_LOCKMETHOD && Trace_locks)
|
||||
|| (LOCK_LOCKMETHOD(*lock) == USER_LOCKMETHOD && Trace_userlocks))
|
||||
&& (lock->tag.relId >= (Oid) Trace_lock_oidmin))
|
||||
|| (Trace_lock_table && (lock->tag.relId == Trace_lock_table));
|
||||
}
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ HOLDER_PRINT(const char *where, const HOLDER *holderP)
|
||||
|| (HOLDER_LOCKMETHOD(*holderP) == USER_LOCKMETHOD && Trace_userlocks))
|
||||
&& (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
|
||||
|| (Trace_lock_table && (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId == Trace_lock_table))
|
||||
)
|
||||
)
|
||||
elog(DEBUG,
|
||||
"%s: holder(%lx) lock(%lx) tbl(%d) proc(%lx) xid(%u) hold(%d,%d,%d,%d,%d,%d,%d)=%d",
|
||||
where, MAKE_OFFSET(holderP), holderP->tag.lock,
|
||||
@@ -147,7 +147,6 @@ HOLDER_PRINT(const char *where, const HOLDER *holderP)
|
||||
|
||||
#define LOCK_PRINT(where, lock, type)
|
||||
#define HOLDER_PRINT(where, holderP)
|
||||
|
||||
#endif /* not LOCK_DEBUG */
|
||||
|
||||
|
||||
@@ -647,9 +646,11 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
|
||||
else
|
||||
{
|
||||
Assert(status == STATUS_FOUND);
|
||||
|
||||
/*
|
||||
* We can't acquire the lock immediately. If caller specified no
|
||||
* blocking, remove the holder entry and return FALSE without waiting.
|
||||
* blocking, remove the holder entry and return FALSE without
|
||||
* waiting.
|
||||
*/
|
||||
if (dontWait)
|
||||
{
|
||||
@@ -911,7 +912,6 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
|
||||
lock,
|
||||
holder) != STATUS_OK)
|
||||
{
|
||||
|
||||
/*
|
||||
* We failed as a result of a deadlock, see HandleDeadLock(). Quit
|
||||
* now. Removal of the holder and lock objects, if no longer
|
||||
@@ -1114,7 +1114,6 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
|
||||
|
||||
if (lock->nRequested == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* if there's no one waiting in the queue, we just released the
|
||||
* last lock on this object. Delete it from the lock table.
|
||||
@@ -1464,5 +1463,4 @@ DumpAllLocks(void)
|
||||
elog(DEBUG, "DumpAllLocks: holder->tag.lock = NULL");
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* LOCK_DEBUG */
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Lightweight locks are intended primarily to provide mutual exclusion of
|
||||
* access to shared-memory data structures. Therefore, they offer both
|
||||
* exclusive and shared lock modes (to support read/write and read-only
|
||||
* access to a shared object). There are few other frammishes. User-level
|
||||
* access to a shared object). There are few other frammishes. User-level
|
||||
* locking should be done with the full lock manager --- which depends on
|
||||
* an LWLock to protect its shared state.
|
||||
*
|
||||
@@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.1 2001/09/29 04:02:24 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.2 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -43,8 +43,9 @@ typedef struct LWLock
|
||||
* the array.
|
||||
*/
|
||||
static LWLock *LWLockArray = NULL;
|
||||
|
||||
/* shared counter for dynamic allocation of LWLockIds */
|
||||
static int *LWLockCounter;
|
||||
static int *LWLockCounter;
|
||||
|
||||
|
||||
/*
|
||||
@@ -55,8 +56,8 @@ static int *LWLockCounter;
|
||||
*/
|
||||
#define MAX_SIMUL_LWLOCKS 100
|
||||
|
||||
static int num_held_lwlocks = 0;
|
||||
static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];
|
||||
static int num_held_lwlocks = 0;
|
||||
static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];
|
||||
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
@@ -71,9 +72,9 @@ PRINT_LWDEBUG(const char *where, LWLockId lockid, const LWLock *lock)
|
||||
(int) lock->exclusive, lock->shared, lock->head);
|
||||
}
|
||||
|
||||
#else /* not LOCK_DEBUG */
|
||||
#else /* not LOCK_DEBUG */
|
||||
#define PRINT_LWDEBUG(a,b,c)
|
||||
#endif /* LOCK_DEBUG */
|
||||
#endif /* LOCK_DEBUG */
|
||||
|
||||
|
||||
/*
|
||||
@@ -82,13 +83,13 @@ PRINT_LWDEBUG(const char *where, LWLockId lockid, const LWLock *lock)
|
||||
int
|
||||
NumLWLocks(void)
|
||||
{
|
||||
int numLocks;
|
||||
int numLocks;
|
||||
|
||||
/*
|
||||
* Possibly this logic should be spread out among the affected modules,
|
||||
* the same way that shmem space estimation is done. But for now,
|
||||
* there are few enough users of LWLocks that we can get away with
|
||||
* just keeping the knowledge here.
|
||||
* Possibly this logic should be spread out among the affected
|
||||
* modules, the same way that shmem space estimation is done. But for
|
||||
* now, there are few enough users of LWLocks that we can get away
|
||||
* with just keeping the knowledge here.
|
||||
*/
|
||||
|
||||
/* Predefined LWLocks */
|
||||
@@ -112,8 +113,8 @@ NumLWLocks(void)
|
||||
int
|
||||
LWLockShmemSize(void)
|
||||
{
|
||||
int numLocks = NumLWLocks();
|
||||
uint32 spaceLocks;
|
||||
int numLocks = NumLWLocks();
|
||||
uint32 spaceLocks;
|
||||
|
||||
/* Allocate the LWLocks plus space for shared allocation counter. */
|
||||
spaceLocks = numLocks * sizeof(LWLock) + 2 * sizeof(int);
|
||||
@@ -129,10 +130,10 @@ LWLockShmemSize(void)
|
||||
void
|
||||
CreateLWLocks(void)
|
||||
{
|
||||
int numLocks = NumLWLocks();
|
||||
uint32 spaceLocks = LWLockShmemSize();
|
||||
LWLock *lock;
|
||||
int id;
|
||||
int numLocks = NumLWLocks();
|
||||
uint32 spaceLocks = LWLockShmemSize();
|
||||
LWLock *lock;
|
||||
int id;
|
||||
|
||||
/* Allocate space */
|
||||
LWLockArray = (LWLock *) ShmemAlloc(spaceLocks);
|
||||
@@ -184,8 +185,8 @@ LWLockAssign(void)
|
||||
void
|
||||
LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
{
|
||||
LWLock *lock = LWLockArray + lockid;
|
||||
bool mustwait;
|
||||
LWLock *lock = LWLockArray + lockid;
|
||||
bool mustwait;
|
||||
|
||||
PRINT_LWDEBUG("LWLockAcquire", lockid, lock);
|
||||
|
||||
@@ -229,13 +230,13 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
if (mustwait)
|
||||
{
|
||||
/* Add myself to wait queue */
|
||||
PROC *proc = MyProc;
|
||||
int extraWaits = 0;
|
||||
PROC *proc = MyProc;
|
||||
int extraWaits = 0;
|
||||
|
||||
/*
|
||||
* If we don't have a PROC structure, there's no way to wait.
|
||||
* This should never occur, since MyProc should only be null
|
||||
* during shared memory initialization.
|
||||
* If we don't have a PROC structure, there's no way to wait. This
|
||||
* should never occur, since MyProc should only be null during
|
||||
* shared memory initialization.
|
||||
*/
|
||||
if (proc == NULL)
|
||||
elog(FATAL, "LWLockAcquire: can't wait without a PROC structure");
|
||||
@@ -256,13 +257,13 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
* Wait until awakened.
|
||||
*
|
||||
* Since we share the process wait semaphore with the regular lock
|
||||
* manager and ProcWaitForSignal, and we may need to acquire an LWLock
|
||||
* while one of those is pending, it is possible that we get awakened
|
||||
* for a reason other than being granted the LWLock. If so, loop back
|
||||
* and wait again. Once we've gotten the lock, re-increment the sema
|
||||
* by the number of additional signals received, so that the lock
|
||||
* manager or signal manager will see the received signal when it
|
||||
* next waits.
|
||||
* manager and ProcWaitForSignal, and we may need to acquire an
|
||||
* LWLock while one of those is pending, it is possible that we
|
||||
* get awakened for a reason other than being granted the LWLock.
|
||||
* If so, loop back and wait again. Once we've gotten the lock,
|
||||
* re-increment the sema by the number of additional signals
|
||||
* received, so that the lock manager or signal manager will see
|
||||
* the received signal when it next waits.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@@ -272,6 +273,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
break;
|
||||
extraWaits++;
|
||||
}
|
||||
|
||||
/*
|
||||
* The awakener already updated the lock struct's state, so we
|
||||
* don't need to do anything more to it. Just need to fix the
|
||||
@@ -301,8 +303,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
||||
bool
|
||||
LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
|
||||
{
|
||||
LWLock *lock = LWLockArray + lockid;
|
||||
bool mustwait;
|
||||
LWLock *lock = LWLockArray + lockid;
|
||||
bool mustwait;
|
||||
|
||||
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
|
||||
|
||||
@@ -367,18 +369,18 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
|
||||
void
|
||||
LWLockRelease(LWLockId lockid)
|
||||
{
|
||||
LWLock *lock = LWLockArray + lockid;
|
||||
PROC *head;
|
||||
PROC *proc;
|
||||
int i;
|
||||
LWLock *lock = LWLockArray + lockid;
|
||||
PROC *head;
|
||||
PROC *proc;
|
||||
int i;
|
||||
|
||||
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
|
||||
|
||||
/*
|
||||
* Remove lock from list of locks held. Usually, but not always,
|
||||
* it will be the latest-acquired lock; so search array backwards.
|
||||
* Remove lock from list of locks held. Usually, but not always, it
|
||||
* will be the latest-acquired lock; so search array backwards.
|
||||
*/
|
||||
for (i = num_held_lwlocks; --i >= 0; )
|
||||
for (i = num_held_lwlocks; --i >= 0;)
|
||||
{
|
||||
if (lockid == held_lwlocks[i])
|
||||
break;
|
||||
@@ -387,7 +389,7 @@ LWLockRelease(LWLockId lockid)
|
||||
elog(ERROR, "LWLockRelease: lock %d is not held", (int) lockid);
|
||||
num_held_lwlocks--;
|
||||
for (; i < num_held_lwlocks; i++)
|
||||
held_lwlocks[i] = held_lwlocks[i+1];
|
||||
held_lwlocks[i] = held_lwlocks[i + 1];
|
||||
|
||||
/* Acquire mutex. Time spent holding mutex should be short! */
|
||||
SpinLockAcquire_NoHoldoff(&lock->mutex);
|
||||
@@ -402,8 +404,8 @@ LWLockRelease(LWLockId lockid)
|
||||
}
|
||||
|
||||
/*
|
||||
* See if I need to awaken any waiters. If I released a non-last shared
|
||||
* hold, there cannot be anything to do.
|
||||
* See if I need to awaken any waiters. If I released a non-last
|
||||
* shared hold, there cannot be anything to do.
|
||||
*/
|
||||
head = lock->head;
|
||||
if (head != NULL)
|
||||
@@ -411,14 +413,12 @@ LWLockRelease(LWLockId lockid)
|
||||
if (lock->exclusive == 0 && lock->shared == 0)
|
||||
{
|
||||
/*
|
||||
* Remove the to-be-awakened PROCs from the queue, and update the
|
||||
* lock state to show them as holding the lock.
|
||||
* Remove the to-be-awakened PROCs from the queue, and update
|
||||
* the lock state to show them as holding the lock.
|
||||
*/
|
||||
proc = head;
|
||||
if (proc->lwExclusive)
|
||||
{
|
||||
lock->exclusive++;
|
||||
}
|
||||
else
|
||||
{
|
||||
lock->shared++;
|
||||
@@ -465,10 +465,10 @@ LWLockRelease(LWLockId lockid)
|
||||
/*
|
||||
* LWLockReleaseAll - release all currently-held locks
|
||||
*
|
||||
* Used to clean up after elog(ERROR). An important difference between this
|
||||
* Used to clean up after elog(ERROR). An important difference between this
|
||||
* function and retail LWLockRelease calls is that InterruptHoldoffCount is
|
||||
* unchanged by this operation. This is necessary since InterruptHoldoffCount
|
||||
* has been set to an appropriate level earlier in error recovery. We could
|
||||
* has been set to an appropriate level earlier in error recovery. We could
|
||||
* decrement it below zero if we allow it to drop for each released lock!
|
||||
*/
|
||||
void
|
||||
@@ -478,6 +478,6 @@ LWLockReleaseAll(void)
|
||||
{
|
||||
HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
|
||||
|
||||
LWLockRelease(held_lwlocks[num_held_lwlocks-1]);
|
||||
LWLockRelease(held_lwlocks[num_held_lwlocks - 1]);
|
||||
}
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.112 2001/10/01 18:16:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.113 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -127,8 +127,8 @@ InitProcGlobal(int maxBackends)
|
||||
* besides those used for regular backends.
|
||||
*/
|
||||
Assert(maxBackends > 0);
|
||||
semMapEntries = PROC_SEM_MAP_ENTRIES(maxBackends+1);
|
||||
procGlobalSize = sizeof(PROC_HDR) + (semMapEntries-1) * sizeof(SEM_MAP_ENTRY);
|
||||
semMapEntries = PROC_SEM_MAP_ENTRIES(maxBackends + 1);
|
||||
procGlobalSize = sizeof(PROC_HDR) + (semMapEntries - 1) *sizeof(SEM_MAP_ENTRY);
|
||||
|
||||
/* Create or attach to the ProcGlobal shared structure */
|
||||
ProcGlobal = (PROC_HDR *)
|
||||
@@ -182,10 +182,10 @@ InitProcGlobal(int maxBackends)
|
||||
*/
|
||||
DummyProc = (PROC *) ShmemAlloc(sizeof(PROC));
|
||||
DummyProc->pid = 0; /* marks DummyProc as not in use */
|
||||
i = semMapEntries-1;
|
||||
ProcGlobal->procSemMap[i].freeSemMap |= 1 << (PROC_NSEMS_PER_SET-1);
|
||||
i = semMapEntries - 1;
|
||||
ProcGlobal->procSemMap[i].freeSemMap |= 1 << (PROC_NSEMS_PER_SET - 1);
|
||||
DummyProc->sem.semId = ProcGlobal->procSemMap[i].procSemId;
|
||||
DummyProc->sem.semNum = PROC_NSEMS_PER_SET-1;
|
||||
DummyProc->sem.semNum = PROC_NSEMS_PER_SET - 1;
|
||||
|
||||
/* Create ProcStructLock spinlock, too */
|
||||
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
|
||||
@@ -199,11 +199,11 @@ InitProcGlobal(int maxBackends)
|
||||
void
|
||||
InitProcess(void)
|
||||
{
|
||||
SHMEM_OFFSET myOffset;
|
||||
SHMEM_OFFSET myOffset;
|
||||
|
||||
/*
|
||||
* ProcGlobal should be set by a previous call to InitProcGlobal
|
||||
* (if we are a backend, we inherit this by fork() from the postmaster).
|
||||
* ProcGlobal should be set by a previous call to InitProcGlobal (if
|
||||
* we are a backend, we inherit this by fork() from the postmaster).
|
||||
*/
|
||||
if (ProcGlobal == NULL)
|
||||
elog(STOP, "InitProcess: Proc Header uninitialized");
|
||||
@@ -260,8 +260,8 @@ InitProcess(void)
|
||||
on_shmem_exit(ProcKill, 0);
|
||||
|
||||
/*
|
||||
* Set up a wait-semaphore for the proc. (We rely on ProcKill to clean
|
||||
* up MyProc if this fails.)
|
||||
* Set up a wait-semaphore for the proc. (We rely on ProcKill to
|
||||
* clean up MyProc if this fails.)
|
||||
*/
|
||||
if (IsUnderPostmaster)
|
||||
ProcGetNewSemIdAndNum(&MyProc->sem.semId, &MyProc->sem.semNum);
|
||||
@@ -291,8 +291,8 @@ void
|
||||
InitDummyProcess(void)
|
||||
{
|
||||
/*
|
||||
* ProcGlobal should be set by a previous call to InitProcGlobal
|
||||
* (we inherit this by fork() from the postmaster).
|
||||
* ProcGlobal should be set by a previous call to InitProcGlobal (we
|
||||
* inherit this by fork() from the postmaster).
|
||||
*/
|
||||
if (ProcGlobal == NULL || DummyProc == NULL)
|
||||
elog(STOP, "InitDummyProcess: Proc Header uninitialized");
|
||||
@@ -309,8 +309,8 @@ InitDummyProcess(void)
|
||||
MyProc = DummyProc;
|
||||
|
||||
/*
|
||||
* Initialize all fields of MyProc, except MyProc->sem which was
|
||||
* set up by InitProcGlobal.
|
||||
* Initialize all fields of MyProc, except MyProc->sem which was set
|
||||
* up by InitProcGlobal.
|
||||
*/
|
||||
MyProc->pid = MyProcPid; /* marks DummyProc as in use by me */
|
||||
SHMQueueElemInit(&(MyProc->links));
|
||||
@@ -471,7 +471,7 @@ ProcKill(void)
|
||||
|
||||
/*
|
||||
* DummyProcKill() -- Cut-down version of ProcKill for dummy (checkpoint)
|
||||
* processes. The PROC and sema are not released, only marked
|
||||
* processes. The PROC and sema are not released, only marked
|
||||
* as not-in-use.
|
||||
*/
|
||||
static void
|
||||
@@ -520,7 +520,6 @@ ProcQueueAlloc(char *name)
|
||||
ProcQueueInit(queue);
|
||||
return queue;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -599,10 +598,11 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
if (lockctl->conflictTab[lockmode] & proc->heldLocks)
|
||||
{
|
||||
/*
|
||||
* Yes, so we have a deadlock. Easiest way to clean up
|
||||
* correctly is to call RemoveFromWaitQueue(), but we
|
||||
* can't do that until we are *on* the wait queue.
|
||||
* So, set a flag to check below, and break out of loop.
|
||||
* Yes, so we have a deadlock. Easiest way to clean
|
||||
* up correctly is to call RemoveFromWaitQueue(), but
|
||||
* we can't do that until we are *on* the wait queue.
|
||||
* So, set a flag to check below, and break out of
|
||||
* loop.
|
||||
*/
|
||||
early_deadlock = true;
|
||||
break;
|
||||
@@ -653,12 +653,12 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
MyProc->waitHolder = holder;
|
||||
MyProc->waitLockMode = lockmode;
|
||||
|
||||
MyProc->errType = STATUS_OK; /* initialize result for success */
|
||||
MyProc->errType = STATUS_OK; /* initialize result for success */
|
||||
|
||||
/*
|
||||
* If we detected deadlock, give up without waiting. This must agree
|
||||
* with HandleDeadLock's recovery code, except that we shouldn't release
|
||||
* the semaphore since we haven't tried to lock it yet.
|
||||
* with HandleDeadLock's recovery code, except that we shouldn't
|
||||
* release the semaphore since we haven't tried to lock it yet.
|
||||
*/
|
||||
if (early_deadlock)
|
||||
{
|
||||
@@ -689,7 +689,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
* By delaying the check until we've waited for a bit, we can avoid
|
||||
* running the rather expensive deadlock-check code in most cases.
|
||||
*/
|
||||
if (! enable_sigalrm_interrupt(DeadlockTimeout))
|
||||
if (!enable_sigalrm_interrupt(DeadlockTimeout))
|
||||
elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
|
||||
|
||||
/*
|
||||
@@ -711,7 +711,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
|
||||
/*
|
||||
* Disable the timer, if it's still running
|
||||
*/
|
||||
if (! disable_sigalrm_interrupt())
|
||||
if (!disable_sigalrm_interrupt())
|
||||
elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
|
||||
|
||||
/*
|
||||
@@ -821,7 +821,6 @@ ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Cannot wake this guy. Remember his request for later
|
||||
* checks.
|
||||
@@ -851,8 +850,8 @@ HandleDeadLock(SIGNAL_ARGS)
|
||||
* Acquire locktable lock. Note that the SIGALRM interrupt had better
|
||||
* not be enabled anywhere that this process itself holds the
|
||||
* locktable lock, else this will wait forever. Also note that
|
||||
* LWLockAcquire creates a critical section, so that this
|
||||
* routine cannot be interrupted by cancel/die interrupts.
|
||||
* LWLockAcquire creates a critical section, so that this routine
|
||||
* cannot be interrupted by cancel/die interrupts.
|
||||
*/
|
||||
LWLockAcquire(LockMgrLock, LW_EXCLUSIVE);
|
||||
|
||||
@@ -960,7 +959,7 @@ ProcCancelWaitForSignal(void)
|
||||
void
|
||||
ProcSendSignal(BackendId procId)
|
||||
{
|
||||
PROC *proc = BackendIdGetProc(procId);
|
||||
PROC *proc = BackendIdGetProc(procId);
|
||||
|
||||
if (proc != NULL)
|
||||
IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum);
|
||||
@@ -976,7 +975,7 @@ ProcSendSignal(BackendId procId)
|
||||
/*
|
||||
* Enable the SIGALRM interrupt to fire after the specified delay
|
||||
*
|
||||
* Delay is given in milliseconds. Caller should be sure a SIGALRM
|
||||
* Delay is given in milliseconds. Caller should be sure a SIGALRM
|
||||
* signal handler is installed before this is called.
|
||||
*
|
||||
* Returns TRUE if okay, FALSE on failure.
|
||||
@@ -997,7 +996,7 @@ enable_sigalrm_interrupt(int delayms)
|
||||
/* BeOS doesn't have setitimer, but has set_alarm */
|
||||
bigtime_t time_interval;
|
||||
|
||||
time_interval = delayms * 1000; /* usecs */
|
||||
time_interval = delayms * 1000; /* usecs */
|
||||
if (set_alarm(time_interval, B_ONE_SHOT_RELATIVE_ALARM) < 0)
|
||||
return false;
|
||||
#endif
|
||||
@@ -1044,7 +1043,7 @@ ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum)
|
||||
{
|
||||
int i;
|
||||
int semMapEntries = ProcGlobal->semMapEntries;
|
||||
SEM_MAP_ENTRY *procSemMap = ProcGlobal->procSemMap;
|
||||
SEM_MAP_ENTRY *procSemMap = ProcGlobal->procSemMap;
|
||||
int32 fullmask = (1 << PROC_NSEMS_PER_SET) - 1;
|
||||
|
||||
SpinLockAcquire(ProcStructLock);
|
||||
@@ -1080,10 +1079,11 @@ ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum)
|
||||
SpinLockRelease(ProcStructLock);
|
||||
|
||||
/*
|
||||
* If we reach here, all the semaphores are in use. This is one of the
|
||||
* possible places to detect "too many backends", so give the standard
|
||||
* error message. (Whether we detect it here or in sinval.c depends on
|
||||
* whether MaxBackends is a multiple of PROC_NSEMS_PER_SET.)
|
||||
* If we reach here, all the semaphores are in use. This is one of
|
||||
* the possible places to detect "too many backends", so give the
|
||||
* standard error message. (Whether we detect it here or in sinval.c
|
||||
* depends on whether MaxBackends is a multiple of
|
||||
* PROC_NSEMS_PER_SET.)
|
||||
*/
|
||||
elog(FATAL, "Sorry, too many clients already");
|
||||
}
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/s_lock.c,v 1.2 2001/09/29 04:02:25 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/s_lock.c,v 1.3 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -48,13 +48,14 @@ s_lock(volatile slock_t *lock, const char *file, int line)
|
||||
struct timeval delay;
|
||||
|
||||
/*
|
||||
* We loop tightly for awhile, then delay using select() and try again.
|
||||
* Preferably, "awhile" should be a small multiple of the maximum time
|
||||
* we expect a spinlock to be held. 100 iterations seems about right.
|
||||
* We loop tightly for awhile, then delay using select() and try
|
||||
* again. Preferably, "awhile" should be a small multiple of the
|
||||
* maximum time we expect a spinlock to be held. 100 iterations seems
|
||||
* about right.
|
||||
*
|
||||
* We use a 10 millisec select delay because that is the lower limit on
|
||||
* many platforms. The timeout is figured on this delay only, and so the
|
||||
* nominal 1 minute is a lower bound.
|
||||
* many platforms. The timeout is figured on this delay only, and so
|
||||
* the nominal 1 minute is a lower bound.
|
||||
*/
|
||||
#define SPINS_PER_DELAY 100
|
||||
#define DELAY_MSEC 10
|
||||
@@ -108,7 +109,6 @@ _success: \n\
|
||||
rts \n\
|
||||
");
|
||||
}
|
||||
|
||||
#endif /* __m68k__ */
|
||||
|
||||
#if defined(__APPLE__) && defined(__ppc__)
|
||||
@@ -138,7 +138,6 @@ success: \n\
|
||||
blr \n\
|
||||
");
|
||||
}
|
||||
|
||||
#endif /* __APPLE__ && __ppc__ */
|
||||
|
||||
#if defined(__powerpc__)
|
||||
@@ -163,7 +162,6 @@ success: \n\
|
||||
blr \n\
|
||||
");
|
||||
}
|
||||
|
||||
#endif /* __powerpc__ */
|
||||
|
||||
#if defined(__mips__) && !defined(__sgi)
|
||||
@@ -171,7 +169,7 @@ static void
|
||||
tas_dummy()
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
"\
|
||||
"\
|
||||
.global tas \n\
|
||||
tas: \n\
|
||||
.frame $sp, 0, $31 \n\
|
||||
@@ -188,10 +186,9 @@ fail: \n\
|
||||
j $31 \n\
|
||||
");
|
||||
}
|
||||
|
||||
#endif /* __mips__ && !__sgi */
|
||||
|
||||
#else /* not __GNUC__ */
|
||||
#else /* not __GNUC__ */
|
||||
/***************************************************************************
|
||||
* All non gcc
|
||||
*/
|
||||
@@ -220,7 +217,6 @@ tas_dummy() /* really means: extern int tas(slock_t
|
||||
asm(" rts");
|
||||
asm(" .data");
|
||||
}
|
||||
|
||||
#endif /* sun3 */
|
||||
|
||||
|
||||
@@ -244,7 +240,6 @@ tas_dummy() /* really means: extern int tas(slock_t
|
||||
asm("retl");
|
||||
asm("nop");
|
||||
}
|
||||
|
||||
#endif /* NEED_SPARC_TAS_ASM */
|
||||
|
||||
|
||||
@@ -255,7 +250,6 @@ tas_dummy() /* really means: extern int tas(slock_t
|
||||
#endif /* NEED_I386_TAS_ASM */
|
||||
|
||||
|
||||
|
||||
#endif /* not __GNUC__ */
|
||||
|
||||
|
||||
@@ -298,5 +292,4 @@ main()
|
||||
exit(3);
|
||||
|
||||
}
|
||||
|
||||
#endif /* S_LOCK_TEST */
|
||||
|
@@ -5,8 +5,8 @@
|
||||
*
|
||||
*
|
||||
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
|
||||
* define the spinlock implementation. This file contains only a stub
|
||||
* implementation for spinlocks using SysV semaphores. The semaphore method
|
||||
* define the spinlock implementation. This file contains only a stub
|
||||
* implementation for spinlocks using SysV semaphores. The semaphore method
|
||||
* is too slow to be very useful :-(
|
||||
*
|
||||
*
|
||||
@@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/spin.c,v 1.4 2001/10/01 18:16:35 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/spin.c,v 1.5 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -92,8 +92,8 @@ CreateSpinlocks(void)
|
||||
{
|
||||
/*
|
||||
* Compute number of spinlocks needed. It would be cleaner to
|
||||
* distribute this logic into the affected modules,
|
||||
* similar to the way shmem space estimation is handled.
|
||||
* distribute this logic into the affected modules, similar to the
|
||||
* way shmem space estimation is handled.
|
||||
*
|
||||
* For now, though, we just need a few spinlocks (10 should be
|
||||
* plenty) plus one for each LWLock.
|
||||
@@ -186,5 +186,4 @@ tas_sema(volatile slock_t *lock)
|
||||
/* Note that TAS macros return 0 if *success* */
|
||||
return !IpcSemaphoreTryLock(lock->semId, lock->sem);
|
||||
}
|
||||
|
||||
#endif /* !HAS_TEST_AND_SET */
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.38 2001/10/23 02:20:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.39 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -125,7 +125,6 @@ PageAddItem(Page page,
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Don't actually do the shuffle till we've checked free
|
||||
* space!
|
||||
@@ -264,7 +263,7 @@ itemoffcompare(const void *itemidp1, const void *itemidp2)
|
||||
* It doesn't remove unused line pointers! Please don't change this.
|
||||
* This routine is usable for heap pages only.
|
||||
*
|
||||
* Returns number of unused line pointers on page. If "unused" is not NULL
|
||||
* Returns number of unused line pointers on page. If "unused" is not NULL
|
||||
* then the unused[] array is filled with indexes of unused line pointers.
|
||||
*/
|
||||
int
|
||||
@@ -283,11 +282,11 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
|
||||
Offset upper;
|
||||
|
||||
/*
|
||||
* It's worth the trouble to be more paranoid here than in most places,
|
||||
* because we are about to reshuffle data in (what is usually) a shared
|
||||
* disk buffer. If we aren't careful then corrupted pointers, lengths,
|
||||
* etc could cause us to clobber adjacent disk buffers, spreading the
|
||||
* data loss further. So, check everything.
|
||||
* It's worth the trouble to be more paranoid here than in most
|
||||
* places, because we are about to reshuffle data in (what is usually)
|
||||
* a shared disk buffer. If we aren't careful then corrupted
|
||||
* pointers, lengths, etc could cause us to clobber adjacent disk
|
||||
* buffers, spreading the data loss further. So, check everything.
|
||||
*/
|
||||
if (pd_lower < (sizeof(PageHeaderData) - sizeof(ItemIdData)) ||
|
||||
pd_lower > pd_upper ||
|
||||
@@ -344,7 +343,7 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
|
||||
}
|
||||
else
|
||||
{
|
||||
(*lp).lp_len = 0; /* indicate unused & deallocated */
|
||||
(*lp).lp_len = 0; /* indicate unused & deallocated */
|
||||
}
|
||||
}
|
||||
|
||||
@@ -390,7 +389,7 @@ PageGetFreeSpace(Page page)
|
||||
|
||||
if (space < sizeof(ItemIdData))
|
||||
return 0;
|
||||
space -= sizeof(ItemIdData); /* XXX not always appropriate */
|
||||
space -= sizeof(ItemIdData); /* XXX not always appropriate */
|
||||
|
||||
return space;
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.87 2001/08/24 14:07:49 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.88 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -58,7 +58,7 @@ typedef struct _MdfdVec
|
||||
|
||||
int mdfd_nextFree; /* link to next freelist member, if free */
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
struct _MdfdVec *mdfd_chain;/* for large relations */
|
||||
struct _MdfdVec *mdfd_chain; /* for large relations */
|
||||
#endif
|
||||
} MdfdVec;
|
||||
|
||||
@@ -194,7 +194,7 @@ mdunlink(RelFileNode rnode)
|
||||
if (status == SM_SUCCESS)
|
||||
{
|
||||
char *segpath = (char *) palloc(strlen(path) + 12);
|
||||
BlockNumber segno;
|
||||
BlockNumber segno;
|
||||
|
||||
for (segno = 1;; segno++)
|
||||
{
|
||||
@@ -258,11 +258,11 @@ mdextend(Relation reln, BlockNumber blocknum, char *buffer)
|
||||
/*
|
||||
* Note: because caller obtained blocknum by calling mdnblocks, which
|
||||
* did a seek(SEEK_END), this seek is often redundant and will be
|
||||
* optimized away by fd.c. It's not redundant, however, if there is a
|
||||
* partial page at the end of the file. In that case we want to try to
|
||||
* overwrite the partial page with a full page. It's also not redundant
|
||||
* if bufmgr.c had to dump another buffer of the same file to make room
|
||||
* for the new page's buffer.
|
||||
* optimized away by fd.c. It's not redundant, however, if there is a
|
||||
* partial page at the end of the file. In that case we want to try
|
||||
* to overwrite the partial page with a full page. It's also not
|
||||
* redundant if bufmgr.c had to dump another buffer of the same file
|
||||
* to make room for the new page's buffer.
|
||||
*/
|
||||
if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
|
||||
return SM_FAIL;
|
||||
@@ -271,7 +271,7 @@ mdextend(Relation reln, BlockNumber blocknum, char *buffer)
|
||||
{
|
||||
if (nbytes > 0)
|
||||
{
|
||||
int save_errno = errno;
|
||||
int save_errno = errno;
|
||||
|
||||
/* Remove the partially-written page */
|
||||
FileTruncate(v->mdfd_vfd, seekpos);
|
||||
@@ -309,7 +309,6 @@ mdopen(Relation reln)
|
||||
|
||||
if (fd < 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* During bootstrap, there are cases where a system relation will
|
||||
* be accessed (by internal backend processes) before the
|
||||
@@ -383,7 +382,6 @@ mdclose_fd(int fd)
|
||||
/* if not closed already */
|
||||
if (v->mdfd_vfd >= 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* We sync the file descriptor so that we don't need to reopen
|
||||
* it at transaction commit to force changes to disk. (This
|
||||
@@ -406,7 +404,6 @@ mdclose_fd(int fd)
|
||||
{
|
||||
if (v->mdfd_vfd >= 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* We sync the file descriptor so that we don't need to reopen
|
||||
* it at transaction commit to force changes to disk. (This
|
||||
@@ -455,8 +452,8 @@ mdread(Relation reln, BlockNumber blocknum, char *buffer)
|
||||
if ((nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ)) != BLCKSZ)
|
||||
{
|
||||
/*
|
||||
* If we are at EOF, return zeroes without complaining.
|
||||
* (XXX Is this still necessary/a good idea??)
|
||||
* If we are at EOF, return zeroes without complaining. (XXX Is
|
||||
* this still necessary/a good idea??)
|
||||
*/
|
||||
if (nbytes == 0 ||
|
||||
(nbytes > 0 && mdnblocks(reln) == blocknum))
|
||||
@@ -664,9 +661,10 @@ mdnblocks(Relation reln)
|
||||
{
|
||||
int fd;
|
||||
MdfdVec *v;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
BlockNumber nblocks;
|
||||
BlockNumber segno;
|
||||
BlockNumber nblocks;
|
||||
BlockNumber segno;
|
||||
#endif
|
||||
|
||||
fd = _mdfd_getrelnfd(reln);
|
||||
@@ -681,6 +679,7 @@ mdnblocks(Relation reln)
|
||||
elog(FATAL, "segment too big in mdnblocks!");
|
||||
if (nblocks < ((BlockNumber) RELSEG_SIZE))
|
||||
return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
|
||||
|
||||
/*
|
||||
* If segment is exactly RELSEG_SIZE, advance to next one.
|
||||
*/
|
||||
@@ -689,11 +688,11 @@ mdnblocks(Relation reln)
|
||||
if (v->mdfd_chain == (MdfdVec *) NULL)
|
||||
{
|
||||
/*
|
||||
* Because we pass O_CREAT, we will create the next
|
||||
* segment (with zero length) immediately, if the last
|
||||
* segment is of length REL_SEGSIZE. This is unnecessary
|
||||
* but harmless, and testing for the case would take more
|
||||
* cycles than it seems worth.
|
||||
* Because we pass O_CREAT, we will create the next segment
|
||||
* (with zero length) immediately, if the last segment is of
|
||||
* length REL_SEGSIZE. This is unnecessary but harmless, and
|
||||
* testing for the case would take more cycles than it seems
|
||||
* worth.
|
||||
*/
|
||||
v->mdfd_chain = _mdfd_openseg(reln, segno, O_CREAT);
|
||||
if (v->mdfd_chain == (MdfdVec *) NULL)
|
||||
@@ -718,9 +717,10 @@ mdtruncate(Relation reln, BlockNumber nblocks)
|
||||
{
|
||||
int fd;
|
||||
MdfdVec *v;
|
||||
BlockNumber curnblk;
|
||||
BlockNumber curnblk;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
BlockNumber priorblocks;
|
||||
BlockNumber priorblocks;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -729,7 +729,7 @@ mdtruncate(Relation reln, BlockNumber nblocks)
|
||||
*/
|
||||
curnblk = mdnblocks(reln);
|
||||
if (nblocks > curnblk)
|
||||
return InvalidBlockNumber; /* bogus request */
|
||||
return InvalidBlockNumber; /* bogus request */
|
||||
if (nblocks == curnblk)
|
||||
return nblocks; /* no work */
|
||||
|
||||
@@ -768,7 +768,7 @@ mdtruncate(Relation reln, BlockNumber nblocks)
|
||||
* truncate the K+1st segment to 0 length but keep it. This is
|
||||
* mainly so that the right thing happens if nblocks==0.
|
||||
*/
|
||||
BlockNumber lastsegblocks = nblocks - priorblocks;
|
||||
BlockNumber lastsegblocks = nblocks - priorblocks;
|
||||
|
||||
if (FileTruncate(v->mdfd_vfd, lastsegblocks * BLCKSZ) < 0)
|
||||
return InvalidBlockNumber;
|
||||
@@ -838,7 +838,6 @@ mdcommit()
|
||||
int
|
||||
mdabort()
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't actually have to do anything here. fd.c will discard
|
||||
* fsync-needed bits in its AtEOXact_Files() routine.
|
||||
@@ -1004,9 +1003,10 @@ _mdfd_getseg(Relation reln, BlockNumber blkno)
|
||||
{
|
||||
MdfdVec *v;
|
||||
int fd;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
BlockNumber segno;
|
||||
BlockNumber i;
|
||||
BlockNumber segno;
|
||||
BlockNumber i;
|
||||
#endif
|
||||
|
||||
fd = _mdfd_getrelnfd(reln);
|
||||
@@ -1019,7 +1019,6 @@ _mdfd_getseg(Relation reln, BlockNumber blkno)
|
||||
|
||||
if (v->mdfd_chain == (MdfdVec *) NULL)
|
||||
{
|
||||
|
||||
/*
|
||||
* We will create the next segment only if the target block is
|
||||
* within it. This prevents Sorcerer's Apprentice syndrome if
|
||||
@@ -1063,8 +1062,9 @@ _mdfd_blind_getseg(RelFileNode rnode, BlockNumber blkno)
|
||||
{
|
||||
char *path;
|
||||
int fd;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
BlockNumber segno;
|
||||
BlockNumber segno;
|
||||
#endif
|
||||
|
||||
path = relpath(rnode);
|
||||
|
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/Attic/mm.c,v 1.27 2001/10/05 17:28:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/Attic/mm.c,v 1.28 2001/10/25 05:49:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -481,7 +481,7 @@ mmnblocks(Relation reln)
|
||||
{
|
||||
MMRelTag rtag;
|
||||
MMRelHashEntry *rentry;
|
||||
BlockNumber nblocks;
|
||||
BlockNumber nblocks;
|
||||
|
||||
if (reln->rd_rel->relisshared)
|
||||
rtag.mmrt_dbid = (Oid) 0;
|
||||
@@ -561,5 +561,4 @@ MMShmemSize()
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#endif /* STABLE_MEMORY_STORAGE */
|
||||
|
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.53 2001/09/29 04:02:25 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.54 2001/10/25 05:49:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -33,21 +33,21 @@ typedef struct f_smgr
|
||||
int (*smgr_create) (Relation reln);
|
||||
int (*smgr_unlink) (RelFileNode rnode);
|
||||
int (*smgr_extend) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_open) (Relation reln);
|
||||
int (*smgr_close) (Relation reln);
|
||||
int (*smgr_read) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_write) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_flush) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_blindwrt) (RelFileNode rnode, BlockNumber blkno,
|
||||
char *buffer, bool dofsync);
|
||||
char *buffer, bool dofsync);
|
||||
int (*smgr_markdirty) (Relation reln, BlockNumber blkno);
|
||||
int (*smgr_blindmarkdirty) (RelFileNode, BlockNumber blkno);
|
||||
BlockNumber (*smgr_nblocks) (Relation reln);
|
||||
BlockNumber (*smgr_truncate) (Relation reln, BlockNumber nblocks);
|
||||
BlockNumber (*smgr_nblocks) (Relation reln);
|
||||
BlockNumber (*smgr_truncate) (Relation reln, BlockNumber nblocks);
|
||||
int (*smgr_commit) (void); /* may be NULL */
|
||||
int (*smgr_abort) (void); /* may be NULL */
|
||||
int (*smgr_sync) (void);
|
||||
@@ -71,7 +71,6 @@ static f_smgr smgrsw[] = {
|
||||
{mminit, mmshutdown, mmcreate, mmunlink, mmextend, mmopen, mmclose,
|
||||
mmread, mmwrite, mmflush, mmblindwrt, mmmarkdirty, mmblindmarkdirty,
|
||||
mmnblocks, NULL, mmcommit, mmabort},
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -89,7 +88,6 @@ static bool smgrwo[] = {
|
||||
false, /* main memory */
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static int NSmgr = lengthof(smgrsw);
|
||||
@@ -438,9 +436,10 @@ smgrblindmarkdirty(int16 which,
|
||||
BlockNumber
|
||||
smgrnblocks(int16 which, Relation reln)
|
||||
{
|
||||
BlockNumber nblocks;
|
||||
BlockNumber nblocks;
|
||||
|
||||
nblocks = (*(smgrsw[which].smgr_nblocks)) (reln);
|
||||
|
||||
/*
|
||||
* NOTE: if a relation ever did grow to 2^32-1 blocks, this code would
|
||||
* fail --- but that's a good thing, because it would stop us from
|
||||
@@ -464,15 +463,15 @@ smgrnblocks(int16 which, Relation reln)
|
||||
BlockNumber
|
||||
smgrtruncate(int16 which, Relation reln, BlockNumber nblocks)
|
||||
{
|
||||
BlockNumber newblks;
|
||||
BlockNumber newblks;
|
||||
|
||||
newblks = nblocks;
|
||||
if (smgrsw[which].smgr_truncate)
|
||||
{
|
||||
/*
|
||||
* Tell the free space map to forget anything it may have stored
|
||||
* for the about-to-be-deleted blocks. We want to be sure it won't
|
||||
* return bogus block numbers later on.
|
||||
* for the about-to-be-deleted blocks. We want to be sure it
|
||||
* won't return bogus block numbers later on.
|
||||
*/
|
||||
MultiRecordFreeSpace(&reln->rd_node,
|
||||
nblocks, MaxBlockNumber,
|
||||
@@ -601,7 +600,6 @@ smgriswo(int16 smgrno)
|
||||
|
||||
return smgrwo[smgrno];
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void
|
||||
|
Reference in New Issue
Block a user