mirror of
https://github.com/postgres/postgres.git
synced 2025-11-12 05:01:15 +03:00
Ye-old pgindent run. Same 4-space tabs.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.33 2000/04/09 04:43:18 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.34 2000/04/12 17:15:33 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -66,8 +66,9 @@ extern IpcSemaphoreId WaitIOSemId;
|
||||
|
||||
long *PrivateRefCount; /* also used in freelist.c */
|
||||
bits8 *BufferLocks; /* flag bits showing locks I have set */
|
||||
BufferTag *BufferTagLastDirtied; /* tag buffer had when last dirtied by me */
|
||||
BufferBlindId *BufferBlindLastDirtied; /* and its BlindId too */
|
||||
BufferTag *BufferTagLastDirtied; /* tag buffer had when last
|
||||
* dirtied by me */
|
||||
BufferBlindId *BufferBlindLastDirtied; /* and its BlindId too */
|
||||
bool *BufferDirtiedByMe; /* T if buf has been dirtied in cur xact */
|
||||
|
||||
|
||||
@@ -242,7 +243,7 @@ InitBufferPool(IPCKey key)
|
||||
elog(FATAL, "InitBufferPool: IpcSemaphoreCreate(WaitIOSemId) failed");
|
||||
WaitCLSemId = IpcSemaphoreCreate(IPCKeyGetWaitCLSemaphoreKey(key),
|
||||
1, IPCProtection,
|
||||
IpcSemaphoreDefaultStartValue, 1);
|
||||
IpcSemaphoreDefaultStartValue, 1);
|
||||
if (WaitCLSemId < 0)
|
||||
elog(FATAL, "InitBufferPool: IpcSemaphoreCreate(WaitCLSemId) failed");
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.79 2000/04/10 23:41:49 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.80 2000/04/12 17:15:34 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -69,7 +69,7 @@ extern long int LocalBufferFlushCount;
|
||||
* marking shared buffer as dirty. We set it to false in xact.c
|
||||
* after transaction is committed/aborted.
|
||||
*/
|
||||
bool SharedBufferChanged = false;
|
||||
bool SharedBufferChanged = false;
|
||||
|
||||
static void WaitIO(BufferDesc *buf, SPINLOCK spinlock);
|
||||
static void StartBufferIO(BufferDesc *buf, bool forInput);
|
||||
@@ -80,7 +80,7 @@ extern void AbortBufferIO(void);
|
||||
|
||||
/*
|
||||
* Macro : BUFFER_IS_BROKEN
|
||||
* Note that write error doesn't mean the buffer broken
|
||||
* Note that write error doesn't mean the buffer broken
|
||||
*/
|
||||
#define BUFFER_IS_BROKEN(buf) ((buf->flags & BM_IO_ERROR) && !(buf->flags & BM_DIRTY))
|
||||
|
||||
@@ -117,7 +117,7 @@ RelationGetBufferWithBuffer(Relation relation,
|
||||
{
|
||||
if (!BufferIsLocal(buffer))
|
||||
{
|
||||
LockRelId *lrelId = & relation->rd_lockInfo.lockRelId;
|
||||
LockRelId *lrelId = &relation->rd_lockInfo.lockRelId;
|
||||
|
||||
bufHdr = &BufferDescriptors[buffer - 1];
|
||||
SpinAcquire(BufMgrLock);
|
||||
@@ -381,29 +381,29 @@ BufferAlloc(Relation reln,
|
||||
inProgress = (buf->flags & BM_IO_IN_PROGRESS);
|
||||
|
||||
*foundPtr = TRUE;
|
||||
if (inProgress) /* confirm end of IO */
|
||||
if (inProgress) /* confirm end of IO */
|
||||
{
|
||||
WaitIO(buf, BufMgrLock);
|
||||
inProgress = (buf->flags & BM_IO_IN_PROGRESS);
|
||||
}
|
||||
if (BUFFER_IS_BROKEN(buf))
|
||||
{
|
||||
/* I couldn't understand the following old comment.
|
||||
* If there's no IO for the buffer and the buffer
|
||||
* is BROKEN,it should be read again. So start a
|
||||
* new buffer IO here.
|
||||
|
||||
/*
|
||||
* I couldn't understand the following old comment. If there's
|
||||
* no IO for the buffer and the buffer is BROKEN,it should be
|
||||
* read again. So start a new buffer IO here.
|
||||
*
|
||||
* wierd race condition:
|
||||
*
|
||||
* We were waiting for someone else to read the buffer. While
|
||||
* we were waiting, the reader boof'd in some way, so the
|
||||
* contents of the buffer are still invalid. By saying
|
||||
* that we didn't find it, we can make the caller
|
||||
* reinitialize the buffer. If two processes are waiting
|
||||
* for this block, both will read the block. The second
|
||||
* one to finish may overwrite any updates made by the
|
||||
* first. (Assume higher level synchronization prevents
|
||||
* this from happening).
|
||||
* We were waiting for someone else to read the buffer. While we
|
||||
* were waiting, the reader boof'd in some way, so the
|
||||
* contents of the buffer are still invalid. By saying that
|
||||
* we didn't find it, we can make the caller reinitialize the
|
||||
* buffer. If two processes are waiting for this block, both
|
||||
* will read the block. The second one to finish may
|
||||
* overwrite any updates made by the first. (Assume higher
|
||||
* level synchronization prevents this from happening).
|
||||
*
|
||||
* This is never going to happen, don't worry about it.
|
||||
*/
|
||||
@@ -483,7 +483,7 @@ BufferAlloc(Relation reln,
|
||||
if (smok == FALSE)
|
||||
{
|
||||
elog(NOTICE, "BufferAlloc: cannot write block %u for %s/%s",
|
||||
buf->tag.blockNum, buf->blind.dbname, buf->blind.relname);
|
||||
buf->tag.blockNum, buf->blind.dbname, buf->blind.relname);
|
||||
inProgress = FALSE;
|
||||
buf->flags |= BM_IO_ERROR;
|
||||
buf->flags &= ~BM_IO_IN_PROGRESS;
|
||||
@@ -628,9 +628,7 @@ BufferAlloc(Relation reln,
|
||||
* attempted, so the flag isnt set.
|
||||
*/
|
||||
if (!inProgress)
|
||||
{
|
||||
StartBufferIO(buf, true);
|
||||
}
|
||||
else
|
||||
ContinueBufferIO(buf, true);
|
||||
|
||||
@@ -777,7 +775,7 @@ FlushBuffer(Buffer buffer, bool release)
|
||||
if (BAD_BUFFER_ID(buffer))
|
||||
return STATUS_ERROR;
|
||||
|
||||
Assert(PrivateRefCount[buffer - 1] > 0); /* else caller didn't pin */
|
||||
Assert(PrivateRefCount[buffer - 1] > 0); /* else caller didn't pin */
|
||||
|
||||
bufHdr = &BufferDescriptors[buffer - 1];
|
||||
bufdb = bufHdr->tag.relId.dbId;
|
||||
@@ -790,9 +788,9 @@ FlushBuffer(Buffer buffer, bool release)
|
||||
|
||||
/* To check if block content changed while flushing. - vadim 01/17/97 */
|
||||
SpinAcquire(BufMgrLock);
|
||||
WaitIO(bufHdr, BufMgrLock); /* confirm end of IO */
|
||||
WaitIO(bufHdr, BufMgrLock); /* confirm end of IO */
|
||||
bufHdr->flags &= ~BM_JUST_DIRTIED;
|
||||
StartBufferIO(bufHdr, false); /* output IO start */
|
||||
StartBufferIO(bufHdr, false); /* output IO start */
|
||||
SpinRelease(BufMgrLock);
|
||||
|
||||
status = smgrflush(DEFAULT_SMGR, bufrel, bufHdr->tag.blockNum,
|
||||
@@ -810,12 +808,13 @@ FlushBuffer(Buffer buffer, bool release)
|
||||
BufferFlushCount++;
|
||||
|
||||
SpinAcquire(BufMgrLock);
|
||||
bufHdr->flags &= ~BM_IO_IN_PROGRESS; /* mark IO finished */
|
||||
TerminateBufferIO(bufHdr); /* output IO finished */
|
||||
bufHdr->flags &= ~BM_IO_IN_PROGRESS; /* mark IO finished */
|
||||
TerminateBufferIO(bufHdr); /* output IO finished */
|
||||
|
||||
/*
|
||||
* If this buffer was marked by someone as DIRTY while we were
|
||||
* flushing it out we must not clear shared DIRTY flag - vadim 01/17/97
|
||||
* flushing it out we must not clear shared DIRTY flag - vadim
|
||||
* 01/17/97
|
||||
*
|
||||
* ... but we can clear BufferDirtiedByMe anyway - tgl 3/31/00
|
||||
*/
|
||||
@@ -927,7 +926,7 @@ ReleaseAndReadBuffer(Buffer buffer,
|
||||
static void
|
||||
SetBufferDirtiedByMe(Buffer buffer, BufferDesc *bufHdr)
|
||||
{
|
||||
BufferTag *tagLastDirtied = & BufferTagLastDirtied[buffer - 1];
|
||||
BufferTag *tagLastDirtied = &BufferTagLastDirtied[buffer - 1];
|
||||
Relation reln;
|
||||
int status;
|
||||
|
||||
@@ -953,8 +952,8 @@ SetBufferDirtiedByMe(Buffer buffer, BufferDesc *bufHdr)
|
||||
if (reln == (Relation) NULL)
|
||||
{
|
||||
status = smgrblindmarkdirty(DEFAULT_SMGR,
|
||||
BufferBlindLastDirtied[buffer - 1].dbname,
|
||||
BufferBlindLastDirtied[buffer - 1].relname,
|
||||
BufferBlindLastDirtied[buffer - 1].dbname,
|
||||
BufferBlindLastDirtied[buffer - 1].relname,
|
||||
tagLastDirtied->relId.dbId,
|
||||
tagLastDirtied->relId.relId,
|
||||
tagLastDirtied->blockNum);
|
||||
@@ -963,7 +962,11 @@ SetBufferDirtiedByMe(Buffer buffer, BufferDesc *bufHdr)
|
||||
{
|
||||
status = smgrmarkdirty(DEFAULT_SMGR, reln,
|
||||
tagLastDirtied->blockNum);
|
||||
/* drop relcache refcnt incremented by RelationIdCacheGetRelation */
|
||||
|
||||
/*
|
||||
* drop relcache refcnt incremented by
|
||||
* RelationIdCacheGetRelation
|
||||
*/
|
||||
RelationDecrementReferenceCount(reln);
|
||||
}
|
||||
if (status == SM_FAIL)
|
||||
@@ -996,25 +999,23 @@ SetBufferDirtiedByMe(Buffer buffer, BufferDesc *bufHdr)
|
||||
static void
|
||||
ClearBufferDirtiedByMe(Buffer buffer, BufferDesc *bufHdr)
|
||||
{
|
||||
BufferTag *tagLastDirtied = & BufferTagLastDirtied[buffer - 1];
|
||||
BufferTag *tagLastDirtied = &BufferTagLastDirtied[buffer - 1];
|
||||
|
||||
/*
|
||||
* Do *not* clear the flag if it refers to some other buffertag than
|
||||
* the data we just wrote. This is unlikely, but possible if some
|
||||
* the data we just wrote. This is unlikely, but possible if some
|
||||
* other backend replaced the buffer contents since we set our flag.
|
||||
*/
|
||||
if (bufHdr->tag.relId.dbId == tagLastDirtied->relId.dbId &&
|
||||
bufHdr->tag.relId.relId == tagLastDirtied->relId.relId &&
|
||||
bufHdr->tag.blockNum == tagLastDirtied->blockNum)
|
||||
{
|
||||
BufferDirtiedByMe[buffer - 1] = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* BufferSync -- Flush all dirty buffers in the pool.
|
||||
*
|
||||
* This is called at transaction commit time. We find all buffers
|
||||
* This is called at transaction commit time. We find all buffers
|
||||
* that have been dirtied by the current xact and flush them to disk.
|
||||
* We do *not* flush dirty buffers that have been dirtied by other xacts.
|
||||
* (This is a substantial change from pre-7.0 behavior.)
|
||||
@@ -1044,16 +1045,16 @@ BufferSync()
|
||||
for (i = 0, bufHdr = BufferDescriptors; i < NBuffers; i++, bufHdr++)
|
||||
{
|
||||
/* Ignore buffers that were not dirtied by me */
|
||||
if (! BufferDirtiedByMe[i])
|
||||
if (!BufferDirtiedByMe[i])
|
||||
continue;
|
||||
|
||||
SpinAcquire(BufMgrLock);
|
||||
|
||||
/*
|
||||
* We only need to write if the buffer is still dirty and still
|
||||
* contains the same disk page that it contained when we dirtied it.
|
||||
* Otherwise, someone else has already written our changes for us,
|
||||
* and we need only fsync.
|
||||
* contains the same disk page that it contained when we dirtied
|
||||
* it. Otherwise, someone else has already written our changes for
|
||||
* us, and we need only fsync.
|
||||
*
|
||||
* (NOTE: it's still possible to do an unnecessary write, if other
|
||||
* xacts have written and then re-dirtied the page since our last
|
||||
@@ -1073,6 +1074,7 @@ BufferSync()
|
||||
bufrel == BufferTagLastDirtied[i].relId.relId &&
|
||||
bufHdr->tag.blockNum == BufferTagLastDirtied[i].blockNum)
|
||||
{
|
||||
|
||||
/*
|
||||
* Try to find relation for buf. This could fail, if the
|
||||
* rel has been flushed from the relcache since we dirtied
|
||||
@@ -1103,19 +1105,20 @@ BufferSync()
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* To check if block content changed while flushing (see
|
||||
* below). - vadim 01/17/97
|
||||
*/
|
||||
WaitIO(bufHdr, BufMgrLock); /* confirm end of IO */
|
||||
bufHdr->flags &= ~BM_JUST_DIRTIED;
|
||||
StartBufferIO(bufHdr, false); /* output IO start */
|
||||
|
||||
/*
|
||||
* If we didn't have the reldesc in our local cache, write
|
||||
* this page out using the 'blind write' storage manager
|
||||
* routine. If we did find it, use the standard
|
||||
* interface.
|
||||
* To check if block content changed while flushing
|
||||
* (see below). - vadim 01/17/97
|
||||
*/
|
||||
WaitIO(bufHdr, BufMgrLock); /* confirm end of IO */
|
||||
bufHdr->flags &= ~BM_JUST_DIRTIED;
|
||||
StartBufferIO(bufHdr, false); /* output IO start */
|
||||
|
||||
/*
|
||||
* If we didn't have the reldesc in our local cache,
|
||||
* write this page out using the 'blind write' storage
|
||||
* manager routine. If we did find it, use the
|
||||
* standard interface.
|
||||
*/
|
||||
#ifndef OPTIMIZE_SINGLE
|
||||
SpinRelease(BufMgrLock);
|
||||
@@ -1127,14 +1130,14 @@ BufferSync()
|
||||
bufHdr->blind.relname,
|
||||
bufdb, bufrel,
|
||||
bufHdr->tag.blockNum,
|
||||
(char *) MAKE_PTR(bufHdr->data),
|
||||
true); /* must fsync */
|
||||
(char *) MAKE_PTR(bufHdr->data),
|
||||
true); /* must fsync */
|
||||
}
|
||||
else
|
||||
{
|
||||
status = smgrwrite(DEFAULT_SMGR, reln,
|
||||
bufHdr->tag.blockNum,
|
||||
(char *) MAKE_PTR(bufHdr->data));
|
||||
(char *) MAKE_PTR(bufHdr->data));
|
||||
}
|
||||
#ifndef OPTIMIZE_SINGLE
|
||||
SpinAcquire(BufMgrLock);
|
||||
@@ -1147,15 +1150,15 @@ BufferSync()
|
||||
elog(ERROR, "BufferSync: cannot write %u for %s",
|
||||
bufHdr->tag.blockNum, bufHdr->blind.relname);
|
||||
}
|
||||
bufHdr->flags &= ~BM_IO_IN_PROGRESS; /* mark IO finished */
|
||||
bufHdr->flags &= ~BM_IO_IN_PROGRESS; /* mark IO finished */
|
||||
TerminateBufferIO(bufHdr); /* Sync IO finished */
|
||||
BufferFlushCount++;
|
||||
didwrite = true;
|
||||
|
||||
/*
|
||||
* If this buffer was marked by someone as DIRTY while we
|
||||
* were flushing it out we must not clear DIRTY flag -
|
||||
* vadim 01/17/97
|
||||
* If this buffer was marked by someone as DIRTY while
|
||||
* we were flushing it out we must not clear DIRTY
|
||||
* flag - vadim 01/17/97
|
||||
*
|
||||
* but it is OK to clear BufferDirtiedByMe - tgl 3/31/00
|
||||
*/
|
||||
@@ -1170,11 +1173,11 @@ BufferSync()
|
||||
}
|
||||
|
||||
/*
|
||||
* If we did not write the buffer (because someone else did),
|
||||
* we must still fsync the file containing it, to ensure that the
|
||||
* If we did not write the buffer (because someone else did), we
|
||||
* must still fsync the file containing it, to ensure that the
|
||||
* write is down to disk before we commit.
|
||||
*/
|
||||
if (! didwrite)
|
||||
if (!didwrite)
|
||||
{
|
||||
#ifndef OPTIMIZE_SINGLE
|
||||
SpinRelease(BufMgrLock);
|
||||
@@ -1184,17 +1187,21 @@ BufferSync()
|
||||
if (reln == (Relation) NULL)
|
||||
{
|
||||
status = smgrblindmarkdirty(DEFAULT_SMGR,
|
||||
BufferBlindLastDirtied[i].dbname,
|
||||
BufferBlindLastDirtied[i].relname,
|
||||
BufferTagLastDirtied[i].relId.dbId,
|
||||
BufferTagLastDirtied[i].relId.relId,
|
||||
BufferTagLastDirtied[i].blockNum);
|
||||
BufferBlindLastDirtied[i].dbname,
|
||||
BufferBlindLastDirtied[i].relname,
|
||||
BufferTagLastDirtied[i].relId.dbId,
|
||||
BufferTagLastDirtied[i].relId.relId,
|
||||
BufferTagLastDirtied[i].blockNum);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = smgrmarkdirty(DEFAULT_SMGR, reln,
|
||||
BufferTagLastDirtied[i].blockNum);
|
||||
/* drop relcache refcnt incremented by RelationIdCacheGetRelation */
|
||||
|
||||
/*
|
||||
* drop relcache refcnt incremented by
|
||||
* RelationIdCacheGetRelation
|
||||
*/
|
||||
RelationDecrementReferenceCount(reln);
|
||||
|
||||
}
|
||||
@@ -1241,6 +1248,7 @@ BufferSync()
|
||||
static void
|
||||
WaitIO(BufferDesc *buf, SPINLOCK spinlock)
|
||||
{
|
||||
|
||||
/*
|
||||
* Changed to wait until there's no IO - Inoue 01/13/2000
|
||||
*/
|
||||
@@ -1369,13 +1377,13 @@ ResetBufferPool(bool isCommit)
|
||||
}
|
||||
PrivateRefCount[i] = 0;
|
||||
|
||||
if (! isCommit)
|
||||
if (!isCommit)
|
||||
BufferDirtiedByMe[i] = false;
|
||||
}
|
||||
|
||||
ResetLocalBufferPool();
|
||||
|
||||
if (! isCommit)
|
||||
if (!isCommit)
|
||||
smgrabort();
|
||||
}
|
||||
|
||||
@@ -1531,7 +1539,7 @@ BufferReplace(BufferDesc *bufHdr)
|
||||
bufHdr->blind.relname, bufdb, bufrel,
|
||||
bufHdr->tag.blockNum,
|
||||
(char *) MAKE_PTR(bufHdr->data),
|
||||
false); /* no fsync */
|
||||
false); /* no fsync */
|
||||
}
|
||||
|
||||
#ifndef OPTIMIZE_SINGLE
|
||||
@@ -1545,9 +1553,10 @@ BufferReplace(BufferDesc *bufHdr)
|
||||
if (status == SM_FAIL)
|
||||
return FALSE;
|
||||
|
||||
/* If we had marked this buffer as needing to be fsync'd, we can forget
|
||||
* about that, because it's now the storage manager's responsibility
|
||||
* (but only if we called smgrwrite, not smgrblindwrt).
|
||||
/*
|
||||
* If we had marked this buffer as needing to be fsync'd, we can
|
||||
* forget about that, because it's now the storage manager's
|
||||
* responsibility (but only if we called smgrwrite, not smgrblindwrt).
|
||||
*/
|
||||
if (reln != (Relation) NULL)
|
||||
ClearBufferDirtiedByMe(BufferDescriptorGetBuffer(bufHdr), bufHdr);
|
||||
@@ -1601,7 +1610,7 @@ ReleaseRelationBuffers(Relation rel)
|
||||
{
|
||||
buf = &LocalBufferDescriptors[i];
|
||||
if (buf->tag.relId.relId == relid)
|
||||
buf->flags &= ~ ( BM_DIRTY | BM_JUST_DIRTIED);
|
||||
buf->flags &= ~(BM_DIRTY | BM_JUST_DIRTIED);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -1614,10 +1623,11 @@ ReleaseRelationBuffers(Relation rel)
|
||||
SpinAcquire(BufMgrLock);
|
||||
holding = true;
|
||||
}
|
||||
recheck:
|
||||
recheck:
|
||||
if (buf->tag.relId.dbId == MyDatabaseId &&
|
||||
buf->tag.relId.relId == relid)
|
||||
{
|
||||
|
||||
/*
|
||||
* If there is I/O in progress, better wait till it's done;
|
||||
* don't want to delete the relation out from under someone
|
||||
@@ -1626,19 +1636,23 @@ ReleaseRelationBuffers(Relation rel)
|
||||
if (buf->flags & BM_IO_IN_PROGRESS)
|
||||
{
|
||||
WaitIO(buf, BufMgrLock);
|
||||
/* By now, the buffer very possibly belongs to some other
|
||||
|
||||
/*
|
||||
* By now, the buffer very possibly belongs to some other
|
||||
* rel, so check again before proceeding.
|
||||
*/
|
||||
goto recheck;
|
||||
}
|
||||
/* Now we can do what we came for */
|
||||
buf->flags &= ~ ( BM_DIRTY | BM_JUST_DIRTIED);
|
||||
buf->flags &= ~(BM_DIRTY | BM_JUST_DIRTIED);
|
||||
ClearBufferDirtiedByMe(i, buf);
|
||||
|
||||
/*
|
||||
* Release any refcount we may have.
|
||||
*
|
||||
* This is very probably dead code, and if it isn't then it's
|
||||
* probably wrong. I added the Assert to find out --- tgl 11/99.
|
||||
* probably wrong. I added the Assert to find out --- tgl
|
||||
* 11/99.
|
||||
*/
|
||||
if (!(buf->flags & BM_FREE))
|
||||
{
|
||||
@@ -1663,7 +1677,7 @@ ReleaseRelationBuffers(Relation rel)
|
||||
* This function marks all the buffers in the buffer cache for a
|
||||
* particular database as clean. This is used when we destroy a
|
||||
* database, to avoid trying to flush data to disk when the directory
|
||||
* tree no longer exists. Implementation is pretty similar to
|
||||
* tree no longer exists. Implementation is pretty similar to
|
||||
* ReleaseRelationBuffers() which is for destroying just one relation.
|
||||
* --------------------------------------------------------------------
|
||||
*/
|
||||
@@ -1677,9 +1691,10 @@ DropBuffers(Oid dbid)
|
||||
for (i = 1; i <= NBuffers; i++)
|
||||
{
|
||||
buf = &BufferDescriptors[i - 1];
|
||||
recheck:
|
||||
recheck:
|
||||
if (buf->tag.relId.dbId == dbid)
|
||||
{
|
||||
|
||||
/*
|
||||
* If there is I/O in progress, better wait till it's done;
|
||||
* don't want to delete the database out from under someone
|
||||
@@ -1688,17 +1703,20 @@ DropBuffers(Oid dbid)
|
||||
if (buf->flags & BM_IO_IN_PROGRESS)
|
||||
{
|
||||
WaitIO(buf, BufMgrLock);
|
||||
/* By now, the buffer very possibly belongs to some other
|
||||
|
||||
/*
|
||||
* By now, the buffer very possibly belongs to some other
|
||||
* DB, so check again before proceeding.
|
||||
*/
|
||||
goto recheck;
|
||||
}
|
||||
/* Now we can do what we came for */
|
||||
buf->flags &= ~ ( BM_DIRTY | BM_JUST_DIRTIED);
|
||||
buf->flags &= ~(BM_DIRTY | BM_JUST_DIRTIED);
|
||||
ClearBufferDirtiedByMe(i, buf);
|
||||
|
||||
/*
|
||||
* The thing should be free, if caller has checked that
|
||||
* no backends are running in that database.
|
||||
* The thing should be free, if caller has checked that no
|
||||
* backends are running in that database.
|
||||
*/
|
||||
Assert(buf->flags & BM_FREE);
|
||||
}
|
||||
@@ -1796,7 +1814,7 @@ BufferPoolBlowaway()
|
||||
*
|
||||
* This function removes from the buffer pool all pages of a relation
|
||||
* that have blocknumber >= specified block. Pages that are dirty are
|
||||
* written out first. If expectDirty is false, a notice is emitted
|
||||
* written out first. If expectDirty is false, a notice is emitted
|
||||
* warning of dirty buffers, but we proceed anyway. An error code is
|
||||
* returned if we fail to dump a dirty buffer or if we find one of
|
||||
* the target pages is pinned into the cache.
|
||||
@@ -1804,7 +1822,7 @@ BufferPoolBlowaway()
|
||||
* This is used by VACUUM before truncating the relation to the given
|
||||
* number of blocks. For VACUUM, we pass expectDirty = false since it
|
||||
* could mean a bug in VACUUM if any of the unwanted pages were still
|
||||
* dirty. (TRUNCATE TABLE also uses it in the same way.)
|
||||
* dirty. (TRUNCATE TABLE also uses it in the same way.)
|
||||
*
|
||||
* This is also used by RENAME TABLE (with block=0 and expectDirty=true)
|
||||
* to clear out the buffer cache before renaming the physical files of
|
||||
@@ -1817,7 +1835,7 @@ BufferPoolBlowaway()
|
||||
* more blocks of the relation.
|
||||
*
|
||||
* Formerly, we considered it an error condition if we found unexpectedly
|
||||
* dirty buffers. However, since BufferSync no longer forces out all
|
||||
* dirty buffers. However, since BufferSync no longer forces out all
|
||||
* dirty buffers at every xact commit, it's possible for dirty buffers
|
||||
* to still be present in the cache due to failure of an earlier
|
||||
* transaction. So, downgrade the error to a mere notice. Maybe we
|
||||
@@ -1845,11 +1863,11 @@ FlushRelationBuffers(Relation rel, BlockNumber block, bool expectDirty)
|
||||
{
|
||||
if (buf->flags & BM_DIRTY)
|
||||
{
|
||||
if (! expectDirty)
|
||||
if (!expectDirty)
|
||||
elog(NOTICE, "FlushRelationBuffers(%s (local), %u): block %u is dirty",
|
||||
RelationGetRelationName(rel),
|
||||
block, buf->tag.blockNum);
|
||||
if (FlushBuffer(-i-1, false) != STATUS_OK)
|
||||
if (FlushBuffer(-i - 1, false) != STATUS_OK)
|
||||
{
|
||||
elog(NOTICE, "FlushRelationBuffers(%s (local), %u): block %u is dirty, could not flush it",
|
||||
RelationGetRelationName(rel),
|
||||
@@ -1873,7 +1891,7 @@ FlushRelationBuffers(Relation rel, BlockNumber block, bool expectDirty)
|
||||
SpinAcquire(BufMgrLock);
|
||||
for (i = 0; i < NBuffers; i++)
|
||||
{
|
||||
recheck:
|
||||
recheck:
|
||||
buf = &BufferDescriptors[i];
|
||||
if (buf->tag.relId.relId == RelationGetRelid(rel) &&
|
||||
(buf->tag.relId.dbId == MyDatabaseId ||
|
||||
@@ -1884,12 +1902,12 @@ FlushRelationBuffers(Relation rel, BlockNumber block, bool expectDirty)
|
||||
{
|
||||
PinBuffer(buf);
|
||||
SpinRelease(BufMgrLock);
|
||||
if (! expectDirty)
|
||||
if (!expectDirty)
|
||||
elog(NOTICE, "FlushRelationBuffers(%s, %u): block %u is dirty (private %ld, global %d)",
|
||||
RelationGetRelationName(rel), block,
|
||||
buf->tag.blockNum,
|
||||
PrivateRefCount[i], buf->refcount);
|
||||
if (FlushBuffer(i+1, true) != STATUS_OK)
|
||||
if (FlushBuffer(i + 1, true) != STATUS_OK)
|
||||
{
|
||||
elog(NOTICE, "FlushRelationBuffers(%s, %u): block %u is dirty (private %ld, global %d), could not flush it",
|
||||
RelationGetRelationName(rel), block,
|
||||
@@ -1898,7 +1916,9 @@ FlushRelationBuffers(Relation rel, BlockNumber block, bool expectDirty)
|
||||
return -1;
|
||||
}
|
||||
SpinAcquire(BufMgrLock);
|
||||
/* Buffer could already be reassigned, so must recheck
|
||||
|
||||
/*
|
||||
* Buffer could already be reassigned, so must recheck
|
||||
* whether it still belongs to rel before freeing it!
|
||||
*/
|
||||
goto recheck;
|
||||
@@ -2244,9 +2264,10 @@ UnlockBuffers()
|
||||
}
|
||||
if (BufferLocks[i] & BL_RI_LOCK)
|
||||
{
|
||||
/*
|
||||
* Someone else could remove our RI lock when acquiring
|
||||
* W lock. This is possible if we came here from elog(ERROR)
|
||||
|
||||
/*
|
||||
* Someone else could remove our RI lock when acquiring W
|
||||
* lock. This is possible if we came here from elog(ERROR)
|
||||
* from IpcSemaphore{Lock|Unlock}(WaitCLSemId). And so we
|
||||
* don't do Assert(buf->ri_lock) here.
|
||||
*/
|
||||
@@ -2270,7 +2291,7 @@ void
|
||||
LockBuffer(Buffer buffer, int mode)
|
||||
{
|
||||
BufferDesc *buf;
|
||||
bits8 *buflock;
|
||||
bits8 *buflock;
|
||||
|
||||
Assert(BufferIsValid(buffer));
|
||||
if (BufferIsLocal(buffer))
|
||||
@@ -2335,11 +2356,12 @@ LockBuffer(Buffer buffer, int mode)
|
||||
{
|
||||
if (buf->r_locks > 3 || (*buflock & BL_RI_LOCK))
|
||||
{
|
||||
|
||||
/*
|
||||
* Our RI lock might be removed by concurrent W lock
|
||||
* acquiring (see what we do with RI locks below
|
||||
* when our own W acquiring succeeded) and so
|
||||
* we set RI lock again if we already did this.
|
||||
* acquiring (see what we do with RI locks below when our
|
||||
* own W acquiring succeeded) and so we set RI lock again
|
||||
* if we already did this.
|
||||
*/
|
||||
*buflock |= BL_RI_LOCK;
|
||||
buf->ri_lock = true;
|
||||
@@ -2358,9 +2380,10 @@ LockBuffer(Buffer buffer, int mode)
|
||||
*buflock |= BL_W_LOCK;
|
||||
if (*buflock & BL_RI_LOCK)
|
||||
{
|
||||
|
||||
/*
|
||||
* It's possible to remove RI locks acquired by another
|
||||
* W lockers here, but they'll take care about it.
|
||||
* It's possible to remove RI locks acquired by another W
|
||||
* lockers here, but they'll take care about it.
|
||||
*/
|
||||
buf->ri_lock = false;
|
||||
*buflock &= ~BL_RI_LOCK;
|
||||
@@ -2384,36 +2407,39 @@ LockBuffer(Buffer buffer, int mode)
|
||||
* i.e at most one io_in_progress spinlock is held
|
||||
* per proc.
|
||||
*/
|
||||
static BufferDesc *InProgressBuf = (BufferDesc *)NULL;
|
||||
static bool IsForInput;
|
||||
static BufferDesc *InProgressBuf = (BufferDesc *) NULL;
|
||||
static bool IsForInput;
|
||||
|
||||
/*
|
||||
* Function:StartBufferIO
|
||||
* (Assumptions)
|
||||
* My process is executing no IO
|
||||
* BufMgrLock is held
|
||||
* BM_IO_IN_PROGRESS mask is not set for the buffer
|
||||
* BM_IO_IN_PROGRESS mask is not set for the buffer
|
||||
* The buffer is Pinned
|
||||
*
|
||||
*/
|
||||
static void StartBufferIO(BufferDesc *buf, bool forInput)
|
||||
static void
|
||||
StartBufferIO(BufferDesc *buf, bool forInput)
|
||||
{
|
||||
Assert(!InProgressBuf);
|
||||
Assert(!(buf->flags & BM_IO_IN_PROGRESS));
|
||||
buf->flags |= BM_IO_IN_PROGRESS;
|
||||
#ifdef HAS_TEST_AND_SET
|
||||
|
||||
/*
|
||||
* There used to be
|
||||
*
|
||||
* Assert(S_LOCK_FREE(&(buf->io_in_progress_lock)));
|
||||
*
|
||||
* here, but that's wrong because of the way WaitIO works: someone else
|
||||
* waiting for the I/O to complete will succeed in grabbing the lock for
|
||||
* a few instructions, and if we context-swap back to here the Assert
|
||||
* could fail. Tiny window for failure, but I've seen it happen -- tgl
|
||||
* waiting for the I/O to complete will succeed in grabbing the lock
|
||||
* for a few instructions, and if we context-swap back to here the
|
||||
* Assert could fail. Tiny window for failure, but I've seen it
|
||||
* happen -- tgl
|
||||
*/
|
||||
S_LOCK(&(buf->io_in_progress_lock));
|
||||
#endif /* HAS_TEST_AND_SET */
|
||||
#endif /* HAS_TEST_AND_SET */
|
||||
InProgressBuf = buf;
|
||||
IsForInput = forInput;
|
||||
}
|
||||
@@ -2426,7 +2452,8 @@ static void StartBufferIO(BufferDesc *buf, bool forInput)
|
||||
* The buffer is Pinned
|
||||
*
|
||||
*/
|
||||
static void TerminateBufferIO(BufferDesc *buf)
|
||||
static void
|
||||
TerminateBufferIO(BufferDesc *buf)
|
||||
{
|
||||
Assert(buf == InProgressBuf);
|
||||
#ifdef HAS_TEST_AND_SET
|
||||
@@ -2434,8 +2461,8 @@ static void TerminateBufferIO(BufferDesc *buf)
|
||||
#else
|
||||
if (buf->refcount > 1)
|
||||
SignalIO(buf);
|
||||
#endif /* HAS_TEST_AND_SET */
|
||||
InProgressBuf = (BufferDesc *)0;
|
||||
#endif /* HAS_TEST_AND_SET */
|
||||
InProgressBuf = (BufferDesc *) 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2446,36 +2473,38 @@ static void TerminateBufferIO(BufferDesc *buf)
|
||||
* The buffer is Pinned
|
||||
*
|
||||
*/
|
||||
static void ContinueBufferIO(BufferDesc *buf, bool forInput)
|
||||
static void
|
||||
ContinueBufferIO(BufferDesc *buf, bool forInput)
|
||||
{
|
||||
Assert(buf == InProgressBuf);
|
||||
Assert(buf->flags & BM_IO_IN_PROGRESS);
|
||||
IsForInput = forInput;
|
||||
}
|
||||
|
||||
void InitBufferIO(void)
|
||||
void
|
||||
InitBufferIO(void)
|
||||
{
|
||||
InProgressBuf = (BufferDesc *)0;
|
||||
InProgressBuf = (BufferDesc *) 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called from ProcReleaseSpins().
|
||||
* BufMgrLock isn't held when this function is called.
|
||||
* BufMgrLock isn't held when this function is called.
|
||||
* BM_IO_ERROR is always set. If BM_IO_ERROR was already
|
||||
* set in case of output,this routine would kill all
|
||||
* set in case of output,this routine would kill all
|
||||
* backends and reset postmaster.
|
||||
*/
|
||||
void AbortBufferIO(void)
|
||||
void
|
||||
AbortBufferIO(void)
|
||||
{
|
||||
BufferDesc *buf = InProgressBuf;
|
||||
|
||||
if (buf)
|
||||
{
|
||||
Assert(buf->flags & BM_IO_IN_PROGRESS);
|
||||
SpinAcquire(BufMgrLock);
|
||||
if (IsForInput)
|
||||
{
|
||||
Assert(!(buf->flags & BM_DIRTY));
|
||||
}
|
||||
else
|
||||
{
|
||||
Assert((buf->flags & BM_DIRTY) != 0);
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.29 2000/01/26 05:56:52 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.30 2000/04/12 17:15:34 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -111,7 +111,10 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
|
||||
(char *) MAKE_PTR(bufHdr->data));
|
||||
LocalBufferFlushCount++;
|
||||
|
||||
/* drop relcache refcount incremented by RelationIdCacheGetRelation */
|
||||
/*
|
||||
* drop relcache refcount incremented by
|
||||
* RelationIdCacheGetRelation
|
||||
*/
|
||||
RelationDecrementReferenceCount(bufrel);
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.4 2000/01/26 05:56:55 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.5 2000/04/12 17:15:35 momjian Exp $
|
||||
*
|
||||
* NOTES:
|
||||
*
|
||||
@@ -23,11 +23,11 @@
|
||||
* will go away automatically at transaction end. If the underlying
|
||||
* virtual File is made with OpenTemporaryFile, then all resources for
|
||||
* the file are certain to be cleaned up even if processing is aborted
|
||||
* by elog(ERROR). To avoid confusion, the caller should take care that
|
||||
* by elog(ERROR). To avoid confusion, the caller should take care that
|
||||
* all calls for a single BufFile are made in the same palloc context.
|
||||
*
|
||||
* BufFile also supports temporary files that exceed the OS file size limit
|
||||
* (by opening multiple fd.c temporary files). This is an essential feature
|
||||
* (by opening multiple fd.c temporary files). This is an essential feature
|
||||
* for sorts and hashjoins on large amounts of data.
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -56,15 +56,19 @@ struct BufFile
|
||||
/* all files except the last have length exactly MAX_PHYSICAL_FILESIZE */
|
||||
File *files; /* palloc'd array with numFiles entries */
|
||||
long *offsets; /* palloc'd array with numFiles entries */
|
||||
/* offsets[i] is the current seek position of files[i]. We use this
|
||||
|
||||
/*
|
||||
* offsets[i] is the current seek position of files[i]. We use this
|
||||
* to avoid making redundant FileSeek calls.
|
||||
*/
|
||||
|
||||
bool isTemp; /* can only add files if this is TRUE */
|
||||
bool dirty; /* does buffer need to be written? */
|
||||
|
||||
/*
|
||||
* "current pos" is position of start of buffer within the logical file.
|
||||
* Position as seen by user of BufFile is (curFile, curOffset + pos).
|
||||
* "current pos" is position of start of buffer within the logical
|
||||
* file. Position as seen by user of BufFile is (curFile, curOffset +
|
||||
* pos).
|
||||
*/
|
||||
int curFile; /* file index (0..n) part of current pos */
|
||||
int curOffset; /* offset part of current pos */
|
||||
@@ -87,7 +91,7 @@ static int BufFileFlush(BufFile *file);
|
||||
static BufFile *
|
||||
makeBufFile(File firstfile)
|
||||
{
|
||||
BufFile *file = (BufFile *) palloc(sizeof(BufFile));
|
||||
BufFile *file = (BufFile *) palloc(sizeof(BufFile));
|
||||
|
||||
file->numFiles = 1;
|
||||
file->files = (File *) palloc(sizeof(File));
|
||||
@@ -117,9 +121,9 @@ extendBufFile(BufFile *file)
|
||||
Assert(pfile >= 0);
|
||||
|
||||
file->files = (File *) repalloc(file->files,
|
||||
(file->numFiles+1) * sizeof(File));
|
||||
(file->numFiles + 1) * sizeof(File));
|
||||
file->offsets = (long *) repalloc(file->offsets,
|
||||
(file->numFiles+1) * sizeof(long));
|
||||
(file->numFiles + 1) * sizeof(long));
|
||||
file->files[file->numFiles] = pfile;
|
||||
file->offsets[file->numFiles] = 0L;
|
||||
file->numFiles++;
|
||||
@@ -130,7 +134,7 @@ extendBufFile(BufFile *file)
|
||||
* multiple temporary files if more than MAX_PHYSICAL_FILESIZE bytes are
|
||||
* written to it).
|
||||
*/
|
||||
BufFile *
|
||||
BufFile *
|
||||
BufFileCreateTemp(void)
|
||||
{
|
||||
BufFile *file;
|
||||
@@ -152,7 +156,7 @@ BufFileCreateTemp(void)
|
||||
* to attach a BufFile to a non-temporary file. Note that BufFiles created
|
||||
* in this way CANNOT be expanded into multiple files.
|
||||
*/
|
||||
BufFile *
|
||||
BufFile *
|
||||
BufFileCreate(File file)
|
||||
{
|
||||
return makeBufFile(file);
|
||||
@@ -166,7 +170,7 @@ BufFileCreate(File file)
|
||||
void
|
||||
BufFileClose(BufFile *file)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
/* flush any unwritten data */
|
||||
BufFileFlush(file);
|
||||
@@ -189,21 +193,22 @@ BufFileClose(BufFile *file)
|
||||
static void
|
||||
BufFileLoadBuffer(BufFile *file)
|
||||
{
|
||||
File thisfile;
|
||||
File thisfile;
|
||||
|
||||
/*
|
||||
* Advance to next component file if necessary and possible.
|
||||
*
|
||||
* This path can only be taken if there is more than one component,
|
||||
* so it won't interfere with reading a non-temp file that is over
|
||||
* This path can only be taken if there is more than one component, so it
|
||||
* won't interfere with reading a non-temp file that is over
|
||||
* MAX_PHYSICAL_FILESIZE.
|
||||
*/
|
||||
if (file->curOffset >= MAX_PHYSICAL_FILESIZE &&
|
||||
file->curFile+1 < file->numFiles)
|
||||
file->curFile + 1 < file->numFiles)
|
||||
{
|
||||
file->curFile++;
|
||||
file->curOffset = 0L;
|
||||
}
|
||||
|
||||
/*
|
||||
* May need to reposition physical file.
|
||||
*/
|
||||
@@ -214,6 +219,7 @@ BufFileLoadBuffer(BufFile *file)
|
||||
return; /* seek failed, read nothing */
|
||||
file->offsets[file->curFile] = file->curOffset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read whatever we can get, up to a full bufferload.
|
||||
*/
|
||||
@@ -239,21 +245,23 @@ BufFileDumpBuffer(BufFile *file)
|
||||
File thisfile;
|
||||
|
||||
/*
|
||||
* Unlike BufFileLoadBuffer, we must dump the whole buffer even if
|
||||
* it crosses a component-file boundary; so we need a loop.
|
||||
* Unlike BufFileLoadBuffer, we must dump the whole buffer even if it
|
||||
* crosses a component-file boundary; so we need a loop.
|
||||
*/
|
||||
while (wpos < file->nbytes)
|
||||
{
|
||||
|
||||
/*
|
||||
* Advance to next component file if necessary and possible.
|
||||
*/
|
||||
if (file->curOffset >= MAX_PHYSICAL_FILESIZE && file->isTemp)
|
||||
{
|
||||
while (file->curFile+1 >= file->numFiles)
|
||||
while (file->curFile + 1 >= file->numFiles)
|
||||
extendBufFile(file);
|
||||
file->curFile++;
|
||||
file->curOffset = 0L;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enforce per-file size limit only for temp files, else just try
|
||||
* to write as much as asked...
|
||||
@@ -261,11 +269,12 @@ BufFileDumpBuffer(BufFile *file)
|
||||
bytestowrite = file->nbytes - wpos;
|
||||
if (file->isTemp)
|
||||
{
|
||||
long availbytes = MAX_PHYSICAL_FILESIZE - file->curOffset;
|
||||
long availbytes = MAX_PHYSICAL_FILESIZE - file->curOffset;
|
||||
|
||||
if ((long) bytestowrite > availbytes)
|
||||
bytestowrite = (int) availbytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* May need to reposition physical file.
|
||||
*/
|
||||
@@ -284,11 +293,13 @@ BufFileDumpBuffer(BufFile *file)
|
||||
wpos += bytestowrite;
|
||||
}
|
||||
file->dirty = false;
|
||||
|
||||
/*
|
||||
* At this point, curOffset has been advanced to the end of the buffer,
|
||||
* ie, its original value + nbytes. We need to make it point to the
|
||||
* logical file position, ie, original value + pos, in case that is less
|
||||
* (as could happen due to a small backwards seek in a dirty buffer!)
|
||||
* At this point, curOffset has been advanced to the end of the
|
||||
* buffer, ie, its original value + nbytes. We need to make it point
|
||||
* to the logical file position, ie, original value + pos, in case
|
||||
* that is less (as could happen due to a small backwards seek in a
|
||||
* dirty buffer!)
|
||||
*/
|
||||
file->curOffset -= (file->nbytes - file->pos);
|
||||
if (file->curOffset < 0) /* handle possible segment crossing */
|
||||
@@ -297,7 +308,11 @@ BufFileDumpBuffer(BufFile *file)
|
||||
Assert(file->curFile >= 0);
|
||||
file->curOffset += MAX_PHYSICAL_FILESIZE;
|
||||
}
|
||||
/* Now we can set the buffer empty without changing the logical position */
|
||||
|
||||
/*
|
||||
* Now we can set the buffer empty without changing the logical
|
||||
* position
|
||||
*/
|
||||
file->pos = 0;
|
||||
file->nbytes = 0;
|
||||
}
|
||||
@@ -317,7 +332,7 @@ BufFileRead(BufFile *file, void *ptr, size_t size)
|
||||
{
|
||||
if (BufFileFlush(file) != 0)
|
||||
return 0; /* could not flush... */
|
||||
Assert(! file->dirty);
|
||||
Assert(!file->dirty);
|
||||
}
|
||||
|
||||
while (size > 0)
|
||||
@@ -430,8 +445,9 @@ BufFileFlush(BufFile *file)
|
||||
int
|
||||
BufFileSeek(BufFile *file, int fileno, long offset, int whence)
|
||||
{
|
||||
int newFile;
|
||||
long newOffset;
|
||||
int newFile;
|
||||
long newOffset;
|
||||
|
||||
switch (whence)
|
||||
{
|
||||
case SEEK_SET:
|
||||
@@ -441,9 +457,11 @@ BufFileSeek(BufFile *file, int fileno, long offset, int whence)
|
||||
newOffset = offset;
|
||||
break;
|
||||
case SEEK_CUR:
|
||||
|
||||
/*
|
||||
* Relative seek considers only the signed offset, ignoring fileno.
|
||||
* Note that large offsets (> 1 gig) risk overflow in this add...
|
||||
* Relative seek considers only the signed offset, ignoring
|
||||
* fileno. Note that large offsets (> 1 gig) risk overflow in
|
||||
* this add...
|
||||
*/
|
||||
newFile = file->curFile;
|
||||
newOffset = (file->curOffset + file->pos) + offset;
|
||||
@@ -467,11 +485,12 @@ BufFileSeek(BufFile *file, int fileno, long offset, int whence)
|
||||
newOffset >= file->curOffset &&
|
||||
newOffset <= file->curOffset + file->nbytes)
|
||||
{
|
||||
|
||||
/*
|
||||
* Seek is to a point within existing buffer; we can just adjust
|
||||
* pos-within-buffer, without flushing buffer. Note this is OK
|
||||
* whether reading or writing, but buffer remains dirty if we
|
||||
* were writing.
|
||||
* pos-within-buffer, without flushing buffer. Note this is OK
|
||||
* whether reading or writing, but buffer remains dirty if we were
|
||||
* writing.
|
||||
*/
|
||||
file->pos = (int) (newOffset - file->curOffset);
|
||||
return 0;
|
||||
@@ -479,10 +498,11 @@ BufFileSeek(BufFile *file, int fileno, long offset, int whence)
|
||||
/* Otherwise, must reposition buffer, so flush any dirty data */
|
||||
if (BufFileFlush(file) != 0)
|
||||
return EOF;
|
||||
|
||||
/*
|
||||
* At this point and no sooner, check for seek past last segment.
|
||||
* The above flush could have created a new segment, so
|
||||
* checking sooner would not work (at least not with this code).
|
||||
* At this point and no sooner, check for seek past last segment. The
|
||||
* above flush could have created a new segment, so checking sooner
|
||||
* would not work (at least not with this code).
|
||||
*/
|
||||
if (file->isTemp)
|
||||
{
|
||||
@@ -544,7 +564,7 @@ BufFileSeekBlock(BufFile *file, long blknum)
|
||||
long
|
||||
BufFileTellBlock(BufFile *file)
|
||||
{
|
||||
long blknum;
|
||||
long blknum;
|
||||
|
||||
blknum = (file->curOffset + file->pos) / BLCKSZ;
|
||||
blknum += file->curFile * RELSEG_SIZE;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.55 2000/04/09 04:43:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.56 2000/04/12 17:15:35 momjian Exp $
|
||||
*
|
||||
* NOTES:
|
||||
*
|
||||
@@ -417,11 +417,11 @@ ReleaseDataFile()
|
||||
DO_DB(elog(DEBUG, "ReleaseDataFile. Opened %d", nfile));
|
||||
|
||||
if (nfile <= 0)
|
||||
return(false);
|
||||
return (false);
|
||||
Assert(VfdCache[0].lruMoreRecently != 0);
|
||||
LruDelete(VfdCache[0].lruMoreRecently);
|
||||
|
||||
return(true);
|
||||
return (true);
|
||||
}
|
||||
|
||||
static File
|
||||
@@ -442,12 +442,16 @@ AllocateVfd()
|
||||
|
||||
SizeVfdCache = 1;
|
||||
|
||||
/* register proc-exit call to ensure temp files are dropped at exit */
|
||||
/*
|
||||
* register proc-exit call to ensure temp files are dropped at
|
||||
* exit
|
||||
*/
|
||||
on_proc_exit(AtEOXact_Files, NULL);
|
||||
}
|
||||
|
||||
if (VfdCache[0].nextFree == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* The free list is empty so it is time to increase the size of
|
||||
* the array. We choose to double it each time this happens.
|
||||
@@ -511,7 +515,7 @@ FreeVfd(File file)
|
||||
*
|
||||
* (Generally, this isn't actually necessary, considering that we
|
||||
* should be cd'd into the database directory. Presently it is only
|
||||
* necessary to do it in "bootstrap" mode. Maybe we should change
|
||||
* necessary to do it in "bootstrap" mode. Maybe we should change
|
||||
* bootstrap mode to do the cd, and save a few cycles/bytes here.)
|
||||
*/
|
||||
static char *
|
||||
@@ -562,6 +566,7 @@ FileAccess(File file)
|
||||
}
|
||||
else if (VfdCache[0].lruLessRecently != file)
|
||||
{
|
||||
|
||||
/*
|
||||
* We now know that the file is open and that it is not the last
|
||||
* one accessed, so we need to move it to the head of the Lru
|
||||
@@ -917,9 +922,11 @@ FileSync(File file)
|
||||
returnCode = 0;
|
||||
VfdCache[file].fdstate &= ~FD_DIRTY;
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
/* We don't use FileAccess() because we don't want to force the
|
||||
|
||||
/*
|
||||
* We don't use FileAccess() because we don't want to force the
|
||||
* file to the front of the LRU ring; we aren't expecting to
|
||||
* access it again soon.
|
||||
*/
|
||||
@@ -941,7 +948,7 @@ FileSync(File file)
|
||||
* FileMarkDirty --- mark a file as needing fsync at transaction commit.
|
||||
*
|
||||
* Since FileWrite marks the file dirty, this routine is not needed in
|
||||
* normal use. It is called when the buffer manager detects that some other
|
||||
* normal use. It is called when the buffer manager detects that some other
|
||||
* backend has written out a shared buffer that this backend dirtied (but
|
||||
* didn't write) in the current xact. In that scenario, we need to fsync
|
||||
* the file before we can commit. We cannot assume that the other backend
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.45 2000/01/26 05:56:57 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.46 2000/04/12 17:15:36 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@@ -49,7 +49,7 @@
|
||||
* so that an elog() from an on_proc_exit routine cannot get us out
|
||||
* of the exit procedure. We do NOT want to go back to the idle loop...
|
||||
*/
|
||||
bool proc_exit_inprogress = false;
|
||||
bool proc_exit_inprogress = false;
|
||||
|
||||
static int UsePrivateMemory = 0;
|
||||
|
||||
@@ -78,7 +78,7 @@ typedef struct _PrivateMemStruct
|
||||
char *memptr;
|
||||
} PrivateMem;
|
||||
|
||||
static PrivateMem IpcPrivateMem[16];
|
||||
static PrivateMem IpcPrivateMem[16];
|
||||
|
||||
|
||||
static int
|
||||
@@ -117,6 +117,7 @@ PrivateMemoryAttach(IpcMemoryId memid)
|
||||
void
|
||||
proc_exit(int code)
|
||||
{
|
||||
|
||||
/*
|
||||
* Once we set this flag, we are committed to exit. Any elog() will
|
||||
* NOT send control back to the main loop, but right back here.
|
||||
@@ -140,7 +141,7 @@ proc_exit(int code)
|
||||
*/
|
||||
while (--on_proc_exit_index >= 0)
|
||||
(*on_proc_exit_list[on_proc_exit_index].function) (code,
|
||||
on_proc_exit_list[on_proc_exit_index].arg);
|
||||
on_proc_exit_list[on_proc_exit_index].arg);
|
||||
|
||||
TPRINTF(TRACE_VERBOSE, "exit(%d)", code);
|
||||
exit(code);
|
||||
@@ -166,7 +167,7 @@ shmem_exit(int code)
|
||||
*/
|
||||
while (--on_shmem_exit_index >= 0)
|
||||
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
|
||||
on_shmem_exit_list[on_shmem_exit_index].arg);
|
||||
on_shmem_exit_list[on_shmem_exit_index].arg);
|
||||
|
||||
on_shmem_exit_index = 0;
|
||||
}
|
||||
@@ -179,7 +180,7 @@ shmem_exit(int code)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
int
|
||||
on_proc_exit(void (*function) (), caddr_t arg)
|
||||
on_proc_exit(void (*function) (), caddr_t arg)
|
||||
{
|
||||
if (on_proc_exit_index >= MAX_ON_EXITS)
|
||||
return -1;
|
||||
@@ -200,7 +201,7 @@ on_proc_exit(void (*function) (), caddr_t arg)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
int
|
||||
on_shmem_exit(void (*function) (), caddr_t arg)
|
||||
on_shmem_exit(void (*function) (), caddr_t arg)
|
||||
{
|
||||
if (on_shmem_exit_index >= MAX_ON_EXITS)
|
||||
return -1;
|
||||
@@ -288,7 +289,7 @@ IpcSemaphoreCreate(IpcSemaphoreKey semKey,
|
||||
|
||||
/* check arguments */
|
||||
if (semNum > IPC_NMAXSEM || semNum <= 0)
|
||||
return(-1);
|
||||
return (-1);
|
||||
|
||||
semId = semget(semKey, 0, 0);
|
||||
|
||||
@@ -308,7 +309,7 @@ IpcSemaphoreCreate(IpcSemaphoreKey semKey,
|
||||
"key=%d, num=%d, permission=%o",
|
||||
strerror(errno), semKey, semNum, permission);
|
||||
IpcConfigTip();
|
||||
return(-1);
|
||||
return (-1);
|
||||
}
|
||||
for (i = 0; i < semNum; i++)
|
||||
array[i] = semStartValue;
|
||||
@@ -320,7 +321,7 @@ IpcSemaphoreCreate(IpcSemaphoreKey semKey,
|
||||
strerror(errno), semId);
|
||||
semctl(semId, 0, IPC_RMID, semun);
|
||||
IpcConfigTip();
|
||||
return(-1);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (removeOnExit)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.32 2000/01/26 05:56:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.33 2000/04/12 17:15:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -56,14 +56,16 @@ CreateSharedMemoryAndSemaphores(IPCKey key, int maxBackends)
|
||||
{
|
||||
int size;
|
||||
extern int XLOGShmemSize(void);
|
||||
extern void XLOGShmemInit(void);
|
||||
extern void XLOGShmemInit(void);
|
||||
|
||||
#ifdef HAS_TEST_AND_SET
|
||||
|
||||
/*
|
||||
* Create shared memory for slocks
|
||||
*/
|
||||
CreateAndInitSLockMemory(IPCKeyGetSLockSharedMemoryKey(key));
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Kill and create the buffer manager buffer pool (and semaphore)
|
||||
*/
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.49 2000/02/26 05:25:55 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.50 2000/04/12 17:15:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -340,9 +340,9 @@ ShmemInitHash(char *name, /* table string name for shmem index */
|
||||
long *location;
|
||||
|
||||
/*
|
||||
* Hash tables allocated in shared memory have a fixed directory;
|
||||
* it can't grow or other backends wouldn't be able to find it.
|
||||
* So, make sure we make it big enough to start with.
|
||||
* Hash tables allocated in shared memory have a fixed directory; it
|
||||
* can't grow or other backends wouldn't be able to find it. So, make
|
||||
* sure we make it big enough to start with.
|
||||
*
|
||||
* The segbase is for calculating pointer values. The shared memory
|
||||
* allocator must be specified too.
|
||||
@@ -354,7 +354,7 @@ ShmemInitHash(char *name, /* table string name for shmem index */
|
||||
|
||||
/* look it up in the shmem index */
|
||||
location = ShmemInitStruct(name,
|
||||
sizeof(HHDR) + infoP->dsize * sizeof(SEG_OFFSET),
|
||||
sizeof(HHDR) + infoP->dsize * sizeof(SEG_OFFSET),
|
||||
&found);
|
||||
|
||||
/*
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.20 2000/01/26 05:56:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.21 2000/04/12 17:15:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -96,8 +96,8 @@ RegisterSharedInvalid(int cacheId, /* XXX */
|
||||
Index hashIndex,
|
||||
ItemPointer pointer)
|
||||
{
|
||||
SharedInvalidData newInvalid;
|
||||
bool insertOK;
|
||||
SharedInvalidData newInvalid;
|
||||
bool insertOK;
|
||||
|
||||
/*
|
||||
* This code has been hacked to accept two types of messages. This
|
||||
@@ -121,7 +121,7 @@ RegisterSharedInvalid(int cacheId, /* XXX */
|
||||
SpinAcquire(SInvalLock);
|
||||
insertOK = SIInsertDataEntry(shmInvalBuffer, &newInvalid);
|
||||
SpinRelease(SInvalLock);
|
||||
if (! insertOK)
|
||||
if (!insertOK)
|
||||
elog(NOTICE, "RegisterSharedInvalid: SI buffer overflow");
|
||||
}
|
||||
|
||||
@@ -130,12 +130,12 @@ RegisterSharedInvalid(int cacheId, /* XXX */
|
||||
* Process shared-cache-invalidation messages waiting for this backend
|
||||
*/
|
||||
void
|
||||
InvalidateSharedInvalid(void (*invalFunction) (),
|
||||
void (*resetFunction) ())
|
||||
InvalidateSharedInvalid(void (*invalFunction) (),
|
||||
void (*resetFunction) ())
|
||||
{
|
||||
SharedInvalidData data;
|
||||
int getResult;
|
||||
bool gotMessage = false;
|
||||
SharedInvalidData data;
|
||||
int getResult;
|
||||
bool gotMessage = false;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
@@ -171,13 +171,13 @@ InvalidateSharedInvalid(void (*invalFunction) (),
|
||||
|
||||
|
||||
/****************************************************************************/
|
||||
/* Functions that need to scan the PROC structures of all running backends. */
|
||||
/* Functions that need to scan the PROC structures of all running backends. */
|
||||
/* It's a bit strange to keep these in sinval.c, since they don't have any */
|
||||
/* direct relationship to shared-cache invalidation. But the procState */
|
||||
/* array in the SI segment is the only place in the system where we have */
|
||||
/* an array of per-backend data, so it is the most convenient place to keep */
|
||||
/* an array of per-backend data, so it is the most convenient place to keep */
|
||||
/* pointers to the backends' PROC structures. We used to implement these */
|
||||
/* functions with a slow, ugly search through the ShmemIndex hash table --- */
|
||||
/* functions with a slow, ugly search through the ShmemIndex hash table --- */
|
||||
/* now they are simple loops over the SI ProcState array. */
|
||||
/****************************************************************************/
|
||||
|
||||
@@ -205,7 +205,7 @@ DatabaseHasActiveBackends(Oid databaseId)
|
||||
|
||||
for (index = 0; index < segP->maxBackends; index++)
|
||||
{
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
@@ -239,7 +239,7 @@ TransactionIdIsInProgress(TransactionId xid)
|
||||
|
||||
for (index = 0; index < segP->maxBackends; index++)
|
||||
{
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
@@ -277,14 +277,15 @@ GetXmaxRecent(TransactionId *XmaxRecent)
|
||||
|
||||
for (index = 0; index < segP->maxBackends; index++)
|
||||
{
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
TransactionId xmin;
|
||||
|
||||
xmin = proc->xmin; /* we don't use spin-locking in AbortTransaction() ! */
|
||||
xmin = proc->xmin; /* we don't use spin-locking in
|
||||
* AbortTransaction() ! */
|
||||
if (proc == MyProc || xmin < FirstTransactionId)
|
||||
continue;
|
||||
if (xmin < *XmaxRecent)
|
||||
@@ -307,8 +308,9 @@ GetSnapshotData(bool serializable)
|
||||
int index;
|
||||
int count = 0;
|
||||
|
||||
/* There can be no more than maxBackends active transactions,
|
||||
* so this is enough space:
|
||||
/*
|
||||
* There can be no more than maxBackends active transactions, so this
|
||||
* is enough space:
|
||||
*/
|
||||
snapshot->xip = (TransactionId *)
|
||||
malloc(segP->maxBackends * sizeof(TransactionId));
|
||||
@@ -317,8 +319,8 @@ GetSnapshotData(bool serializable)
|
||||
SpinAcquire(SInvalLock);
|
||||
|
||||
/*
|
||||
* Unfortunately, we have to call ReadNewTransactionId()
|
||||
* after acquiring SInvalLock above. It's not good because
|
||||
* Unfortunately, we have to call ReadNewTransactionId() after
|
||||
* acquiring SInvalLock above. It's not good because
|
||||
* ReadNewTransactionId() does SpinAcquire(OidGenLockId) but
|
||||
* _necessary_.
|
||||
*/
|
||||
@@ -326,26 +328,27 @@ GetSnapshotData(bool serializable)
|
||||
|
||||
for (index = 0; index < segP->maxBackends; index++)
|
||||
{
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
SHMEM_OFFSET pOffset = stateP[index].procStruct;
|
||||
|
||||
if (pOffset != INVALID_OFFSET)
|
||||
{
|
||||
PROC *proc = (PROC *) MAKE_PTR(pOffset);
|
||||
TransactionId xid;
|
||||
|
||||
/*
|
||||
* We don't use spin-locking when changing proc->xid
|
||||
* in GetNewTransactionId() and in AbortTransaction() !..
|
||||
/*
|
||||
* We don't use spin-locking when changing proc->xid in
|
||||
* GetNewTransactionId() and in AbortTransaction() !..
|
||||
*/
|
||||
xid = proc->xid;
|
||||
if (proc == MyProc ||
|
||||
if (proc == MyProc ||
|
||||
xid < FirstTransactionId || xid >= snapshot->xmax)
|
||||
{
|
||||
|
||||
/*
|
||||
* Seems that there is no sense to store xid >= snapshot->xmax
|
||||
* (what we got from ReadNewTransactionId above) in
|
||||
* snapshot->xip - we just assume that all xacts with such
|
||||
* xid-s are running and may be ignored.
|
||||
* Seems that there is no sense to store xid >=
|
||||
* snapshot->xmax (what we got from ReadNewTransactionId
|
||||
* above) in snapshot->xip - we just assume that all xacts
|
||||
* with such xid-s are running and may be ignored.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.29 2000/03/17 02:36:21 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.30 2000/04/12 17:15:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -53,8 +53,9 @@ SISegmentInit(bool createNewSegment, IPCKey key, int maxBackends)
|
||||
/* Kill existing segment, if any */
|
||||
IpcMemoryKill(key);
|
||||
|
||||
/* Figure space needed.
|
||||
* Note sizeof(SISeg) includes the first ProcState entry.
|
||||
/*
|
||||
* Figure space needed. Note sizeof(SISeg) includes the first
|
||||
* ProcState entry.
|
||||
*/
|
||||
segSize = sizeof(SISeg) + sizeof(ProcState) * (maxBackends - 1);
|
||||
|
||||
@@ -125,7 +126,7 @@ SISegInit(SISeg *segP, int maxBackends)
|
||||
/* Mark all backends inactive */
|
||||
for (i = 0; i < maxBackends; i++)
|
||||
{
|
||||
segP->procState[i].nextMsgNum = -1; /* inactive */
|
||||
segP->procState[i].nextMsgNum = -1; /* inactive */
|
||||
segP->procState[i].resetState = false;
|
||||
segP->procState[i].tag = InvalidBackendTag;
|
||||
segP->procState[i].procStruct = INVALID_OFFSET;
|
||||
@@ -143,8 +144,8 @@ SISegInit(SISeg *segP, int maxBackends)
|
||||
int
|
||||
SIBackendInit(SISeg *segP)
|
||||
{
|
||||
int index;
|
||||
ProcState *stateP = NULL;
|
||||
int index;
|
||||
ProcState *stateP = NULL;
|
||||
|
||||
Assert(MyBackendTag > 0);
|
||||
|
||||
@@ -165,7 +166,8 @@ SIBackendInit(SISeg *segP)
|
||||
}
|
||||
}
|
||||
|
||||
/* elog() with spinlock held is probably not too cool, but this
|
||||
/*
|
||||
* elog() with spinlock held is probably not too cool, but this
|
||||
* condition should never happen anyway.
|
||||
*/
|
||||
if (stateP == NULL)
|
||||
@@ -230,11 +232,12 @@ CleanupInvalidationState(int status,
|
||||
bool
|
||||
SIInsertDataEntry(SISeg *segP, SharedInvalidData *data)
|
||||
{
|
||||
int numMsgs = segP->maxMsgNum - segP->minMsgNum;
|
||||
int numMsgs = segP->maxMsgNum - segP->minMsgNum;
|
||||
|
||||
/* Is the buffer full? */
|
||||
if (numMsgs >= MAXNUMMESSAGES)
|
||||
{
|
||||
|
||||
/*
|
||||
* Don't panic just yet: slowest backend might have consumed some
|
||||
* messages but not yet have done SIDelExpiredDataEntries() to
|
||||
@@ -254,8 +257,9 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidData *data)
|
||||
* Try to prevent table overflow. When the table is 70% full send a
|
||||
* SIGUSR2 (ordinarily a NOTIFY signal) to the postmaster, which will
|
||||
* send it back to all the backends. This will force idle backends to
|
||||
* execute a transaction to look through pg_listener for NOTIFY messages,
|
||||
* and as a byproduct of the transaction start they will read SI entries.
|
||||
* execute a transaction to look through pg_listener for NOTIFY
|
||||
* messages, and as a byproduct of the transaction start they will
|
||||
* read SI entries.
|
||||
*
|
||||
* This should never happen if all the backends are actively executing
|
||||
* queries, but if a backend is sitting idle then it won't be starting
|
||||
@@ -267,7 +271,7 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidData *data)
|
||||
IsUnderPostmaster)
|
||||
{
|
||||
TPRINTF(TRACE_VERBOSE,
|
||||
"SIInsertDataEntry: table is 70%% full, signaling postmaster");
|
||||
"SIInsertDataEntry: table is 70%% full, signaling postmaster");
|
||||
kill(getppid(), SIGUSR2);
|
||||
}
|
||||
|
||||
@@ -296,7 +300,7 @@ SISetProcStateInvalid(SISeg *segP)
|
||||
|
||||
for (i = 0; i < segP->maxBackends; i++)
|
||||
{
|
||||
if (segP->procState[i].nextMsgNum >= 0) /* active backend? */
|
||||
if (segP->procState[i].nextMsgNum >= 0) /* active backend? */
|
||||
{
|
||||
segP->procState[i].resetState = true;
|
||||
segP->procState[i].nextMsgNum = 0;
|
||||
@@ -318,13 +322,15 @@ int
|
||||
SIGetDataEntry(SISeg *segP, int backendId,
|
||||
SharedInvalidData *data)
|
||||
{
|
||||
ProcState *stateP = & segP->procState[backendId - 1];
|
||||
ProcState *stateP = &segP->procState[backendId - 1];
|
||||
|
||||
Assert(stateP->tag == MyBackendTag);
|
||||
|
||||
if (stateP->resetState)
|
||||
{
|
||||
/* Force reset. We can say we have dealt with any messages added
|
||||
|
||||
/*
|
||||
* Force reset. We can say we have dealt with any messages added
|
||||
* since the reset, as well...
|
||||
*/
|
||||
stateP->resetState = false;
|
||||
@@ -341,9 +347,10 @@ SIGetDataEntry(SISeg *segP, int backendId,
|
||||
*data = segP->buffer[stateP->nextMsgNum % MAXNUMMESSAGES];
|
||||
stateP->nextMsgNum++;
|
||||
|
||||
/* There may be other backends that haven't read the message,
|
||||
* so we cannot delete it here.
|
||||
* SIDelExpiredDataEntries() should be called to remove dead messages.
|
||||
/*
|
||||
* There may be other backends that haven't read the message, so we
|
||||
* cannot delete it here. SIDelExpiredDataEntries() should be called
|
||||
* to remove dead messages.
|
||||
*/
|
||||
return 1; /* got a message */
|
||||
}
|
||||
@@ -376,8 +383,9 @@ SIDelExpiredDataEntries(SISeg *segP)
|
||||
}
|
||||
segP->minMsgNum = min;
|
||||
|
||||
/* When minMsgNum gets really large, decrement all message counters
|
||||
* so as to forestall overflow of the counters.
|
||||
/*
|
||||
* When minMsgNum gets really large, decrement all message counters so
|
||||
* as to forestall overflow of the counters.
|
||||
*/
|
||||
if (min >= MSGNUMWRAPAROUND)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/Attic/spin.c,v 1.23 2000/04/12 04:58:09 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/Attic/spin.c,v 1.24 2000/04/12 17:15:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -59,7 +59,8 @@ InitSpinLocks(void)
|
||||
extern SPINLOCK SInvalLock;
|
||||
extern SPINLOCK OidGenLockId;
|
||||
extern SPINLOCK XidGenLockId;
|
||||
extern SPINLOCK ControlFileLockId;
|
||||
extern SPINLOCK ControlFileLockId;
|
||||
|
||||
#ifdef STABLE_MEMORY_STORAGE
|
||||
extern SPINLOCK MMCacheLock;
|
||||
|
||||
@@ -233,7 +234,7 @@ CreateSpinlocks(IPCKey key)
|
||||
{
|
||||
|
||||
SpinLockId = IpcSemaphoreCreate(key, MAX_SPINS, IPCProtection,
|
||||
IpcSemaphoreDefaultStartValue, 1);
|
||||
IpcSemaphoreDefaultStartValue, 1);
|
||||
|
||||
if (SpinLockId <= 0)
|
||||
elog(STOP, "CreateSpinlocks: cannot create spin locks");
|
||||
@@ -264,7 +265,7 @@ InitSpinLocks(void)
|
||||
extern SPINLOCK SInvalLock;
|
||||
extern SPINLOCK OidGenLockId;
|
||||
extern SPINLOCK XidGenLockId;
|
||||
extern SPINLOCK ControlFileLockId;
|
||||
extern SPINLOCK ControlFileLockId;
|
||||
|
||||
#ifdef STABLE_MEMORY_STORAGE
|
||||
extern SPINLOCK MMCacheLock;
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.66 2000/04/08 04:37:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.67 2000/04/12 17:15:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -289,7 +289,8 @@ inv_drop(Oid lobjId)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Since heap_drop_with_catalog will destroy the relcache entry,
|
||||
/*
|
||||
* Since heap_drop_with_catalog will destroy the relcache entry,
|
||||
* there's no need to drop the refcount in this path.
|
||||
*/
|
||||
heap_drop_with_catalog(RelationGetRelationName(r));
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.38 2000/01/26 05:57:00 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.39 2000/04/12 17:15:38 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -145,14 +145,14 @@ LockRelation(Relation relation, LOCKMODE lockmode)
|
||||
tag.dbId = relation->rd_lockInfo.lockRelId.dbId;
|
||||
tag.objId.blkno = InvalidBlockNumber;
|
||||
|
||||
if (! LockAcquire(LockTableId, &tag, lockmode))
|
||||
if (!LockAcquire(LockTableId, &tag, lockmode))
|
||||
elog(ERROR, "LockRelation: LockAcquire failed");
|
||||
|
||||
/*
|
||||
* Check to see if the relcache entry has been invalidated
|
||||
* while we were waiting to lock it. If so, rebuild it,
|
||||
* or elog() trying. Increment the refcount to ensure that
|
||||
* RelationFlushRelation will rebuild it and not just delete it.
|
||||
* Check to see if the relcache entry has been invalidated while we
|
||||
* were waiting to lock it. If so, rebuild it, or elog() trying.
|
||||
* Increment the refcount to ensure that RelationFlushRelation will
|
||||
* rebuild it and not just delete it.
|
||||
*/
|
||||
RelationIncrementReferenceCount(relation);
|
||||
DiscardInvalid();
|
||||
@@ -194,7 +194,7 @@ LockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode)
|
||||
tag.dbId = relation->rd_lockInfo.lockRelId.dbId;
|
||||
tag.objId.blkno = blkno;
|
||||
|
||||
if (! LockAcquire(LockTableId, &tag, lockmode))
|
||||
if (!LockAcquire(LockTableId, &tag, lockmode))
|
||||
elog(ERROR, "LockPage: LockAcquire failed");
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ XactLockTableInsert(TransactionId xid)
|
||||
tag.dbId = InvalidOid;
|
||||
tag.objId.xid = xid;
|
||||
|
||||
if (! LockAcquire(LockTableId, &tag, ExclusiveLock))
|
||||
if (!LockAcquire(LockTableId, &tag, ExclusiveLock))
|
||||
elog(ERROR, "XactLockTableInsert: LockAcquire failed");
|
||||
}
|
||||
|
||||
@@ -263,7 +263,7 @@ XactLockTableWait(TransactionId xid)
|
||||
tag.dbId = InvalidOid;
|
||||
tag.objId.xid = xid;
|
||||
|
||||
if (! LockAcquire(LockTableId, &tag, ShareLock))
|
||||
if (!LockAcquire(LockTableId, &tag, ShareLock))
|
||||
elog(ERROR, "XactLockTableWait: LockAcquire failed");
|
||||
|
||||
LockRelease(LockTableId, &tag, ShareLock);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.71 2000/04/10 00:45:42 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.72 2000/04/12 17:15:38 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -47,7 +47,7 @@
|
||||
* This is so that we can support more backends. (system-wide semaphore
|
||||
* sets run out pretty fast.) -ay 4/95
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.71 2000/04/10 00:45:42 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.72 2000/04/12 17:15:38 momjian Exp $
|
||||
*/
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
@@ -72,7 +72,7 @@
|
||||
#include "storage/proc.h"
|
||||
#include "utils/trace.h"
|
||||
|
||||
void HandleDeadLock(SIGNAL_ARGS);
|
||||
void HandleDeadLock(SIGNAL_ARGS);
|
||||
static void ProcFreeAllSemaphores(void);
|
||||
static bool GetOffWaitqueue(PROC *);
|
||||
|
||||
@@ -320,11 +320,13 @@ InitProcess(IPCKey key)
|
||||
static bool
|
||||
GetOffWaitqueue(PROC *proc)
|
||||
{
|
||||
bool getoffed = false;
|
||||
bool getoffed = false;
|
||||
|
||||
LockLockTable();
|
||||
if (proc->links.next != INVALID_OFFSET)
|
||||
{
|
||||
int lockmode = proc->token;
|
||||
int lockmode = proc->token;
|
||||
|
||||
Assert(proc->waitLock->waitProcs.size > 0);
|
||||
SHMQueueDelete(&(proc->links));
|
||||
--proc->waitLock->waitProcs.size;
|
||||
@@ -343,6 +345,7 @@ GetOffWaitqueue(PROC *proc)
|
||||
|
||||
return getoffed;
|
||||
}
|
||||
|
||||
/*
|
||||
* ProcReleaseLocks() -- release all locks associated with this process
|
||||
*
|
||||
@@ -485,8 +488,9 @@ ProcQueueInit(PROC_QUEUE *queue)
|
||||
* Handling cancel request while waiting for lock
|
||||
*
|
||||
*/
|
||||
static bool lockWaiting = false;
|
||||
void SetWaitingForLock(bool waiting)
|
||||
static bool lockWaiting = false;
|
||||
void
|
||||
SetWaitingForLock(bool waiting)
|
||||
{
|
||||
if (waiting == lockWaiting)
|
||||
return;
|
||||
@@ -499,7 +503,7 @@ void SetWaitingForLock(bool waiting)
|
||||
lockWaiting = false;
|
||||
return;
|
||||
}
|
||||
if (QueryCancel) /* cancel request pending */
|
||||
if (QueryCancel) /* cancel request pending */
|
||||
{
|
||||
if (GetOffWaitqueue(MyProc))
|
||||
{
|
||||
@@ -509,11 +513,14 @@ void SetWaitingForLock(bool waiting)
|
||||
}
|
||||
}
|
||||
}
|
||||
void LockWaitCancel(void)
|
||||
void
|
||||
LockWaitCancel(void)
|
||||
{
|
||||
struct itimerval timeval, dummy;
|
||||
struct itimerval timeval,
|
||||
dummy;
|
||||
|
||||
if (!lockWaiting) return;
|
||||
if (!lockWaiting)
|
||||
return;
|
||||
lockWaiting = false;
|
||||
/* Deadlock timer off */
|
||||
MemSet(&timeval, 0, sizeof(struct itimerval));
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.28 2000/01/26 05:57:04 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.29 2000/04/12 17:15:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -40,7 +40,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
|
||||
|
||||
Assert(pageSize == BLCKSZ);
|
||||
Assert(pageSize >
|
||||
specialSize + sizeof(PageHeaderData) - sizeof(ItemIdData));
|
||||
specialSize + sizeof(PageHeaderData) - sizeof(ItemIdData));
|
||||
|
||||
specialSize = MAXALIGN(specialSize);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.66 2000/04/10 23:41:51 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.67 2000/04/12 17:15:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -21,7 +21,8 @@
|
||||
#include "catalog/catalog.h"
|
||||
#include "miscadmin.h"
|
||||
#include "storage/smgr.h"
|
||||
#include "utils/inval.h" /* ImmediateSharedRelationCacheInvalidate() */
|
||||
#include "utils/inval.h" /* ImmediateSharedRelationCacheInvalidate()
|
||||
* */
|
||||
|
||||
#undef DIAGNOSTIC
|
||||
|
||||
@@ -62,17 +63,18 @@ typedef struct _MdfdVec
|
||||
|
||||
static int Nfds = 100; /* initial/current size of Md_fdvec array */
|
||||
static MdfdVec *Md_fdvec = (MdfdVec *) NULL;
|
||||
static int Md_Free = -1; /* head of freelist of unused fdvec entries */
|
||||
static int Md_Free = -1; /* head of freelist of unused fdvec
|
||||
* entries */
|
||||
static int CurFd = 0; /* first never-used fdvec index */
|
||||
static MemoryContext MdCxt; /* context for all my allocations */
|
||||
|
||||
/* routines declared here */
|
||||
static void mdclose_fd(int fd);
|
||||
static int _mdfd_getrelnfd(Relation reln);
|
||||
static int _mdfd_getrelnfd(Relation reln);
|
||||
static MdfdVec *_mdfd_openseg(Relation reln, int segno, int oflags);
|
||||
static MdfdVec *_mdfd_getseg(Relation reln, int blkno);
|
||||
static int _mdfd_blind_getseg(char *dbname, char *relname,
|
||||
Oid dbid, Oid relid, int blkno);
|
||||
Oid dbid, Oid relid, int blkno);
|
||||
static int _fdvec_alloc(void);
|
||||
static void _fdvec_free(int);
|
||||
static BlockNumber _mdnblocks(File file, Size blcksz);
|
||||
@@ -140,11 +142,10 @@ mdcreate(Relation reln)
|
||||
* are processed.
|
||||
*
|
||||
* For cataloged relations,pg_class is guaranteed to have an unique
|
||||
* record with the same relname by the unique index.
|
||||
* So we are able to reuse existent files for new catloged relations.
|
||||
* Currently we reuse them in the following cases.
|
||||
* 1. they are empty.
|
||||
* 2. they are used for Index relations and their size == BLCKSZ * 2.
|
||||
* record with the same relname by the unique index. So we are able to
|
||||
* reuse existent files for new catloged relations. Currently we reuse
|
||||
* them in the following cases. 1. they are empty. 2. they are used
|
||||
* for Index relations and their size == BLCKSZ * 2.
|
||||
*/
|
||||
|
||||
if (fd < 0)
|
||||
@@ -162,13 +163,13 @@ mdcreate(Relation reln)
|
||||
return -1;
|
||||
if (!IsBootstrapProcessingMode())
|
||||
{
|
||||
bool reuse = false;
|
||||
int len = FileSeek(fd, 0L, SEEK_END);
|
||||
bool reuse = false;
|
||||
int len = FileSeek(fd, 0L, SEEK_END);
|
||||
|
||||
if (len == 0)
|
||||
reuse = true;
|
||||
else if (reln->rd_rel->relkind == RELKIND_INDEX &&
|
||||
len == BLCKSZ * 2)
|
||||
len == BLCKSZ * 2)
|
||||
reuse = true;
|
||||
if (!reuse)
|
||||
{
|
||||
@@ -206,14 +207,15 @@ mdunlink(Relation reln)
|
||||
MdfdVec *v;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
/* If the relation is already unlinked,we have nothing to do
|
||||
* any more.
|
||||
/*
|
||||
* If the relation is already unlinked,we have nothing to do any more.
|
||||
*/
|
||||
if (reln->rd_unlinked && reln->rd_fd < 0)
|
||||
return SM_SUCCESS;
|
||||
|
||||
/*
|
||||
* Force all segments of the relation to be opened, so that we
|
||||
* won't miss deleting any of them.
|
||||
* Force all segments of the relation to be opened, so that we won't
|
||||
* miss deleting any of them.
|
||||
*/
|
||||
nblocks = mdnblocks(reln);
|
||||
|
||||
@@ -222,9 +224,10 @@ mdunlink(Relation reln)
|
||||
*
|
||||
* NOTE: We truncate the file(s) before deleting 'em, because if other
|
||||
* backends are holding the files open, the unlink will fail on some
|
||||
* platforms (think Microsoft). Better a zero-size file gets left around
|
||||
* than a big file. Those other backends will be forced to close the
|
||||
* relation by cache invalidation, but that probably hasn't happened yet.
|
||||
* platforms (think Microsoft). Better a zero-size file gets left
|
||||
* around than a big file. Those other backends will be forced to
|
||||
* close the relation by cache invalidation, but that probably hasn't
|
||||
* happened yet.
|
||||
*/
|
||||
fd = RelationGetFile(reln);
|
||||
if (fd < 0) /* should not happen */
|
||||
@@ -237,6 +240,7 @@ mdunlink(Relation reln)
|
||||
for (v = &Md_fdvec[fd]; v != (MdfdVec *) NULL;)
|
||||
{
|
||||
MdfdVec *ov = v;
|
||||
|
||||
FileTruncate(v->mdfd_vfd, 0);
|
||||
FileUnlink(v->mdfd_vfd);
|
||||
v = v->mdfd_chain;
|
||||
@@ -269,7 +273,8 @@ mdunlink(Relation reln)
|
||||
int
|
||||
mdextend(Relation reln, char *buffer)
|
||||
{
|
||||
long pos, nbytes;
|
||||
long pos,
|
||||
nbytes;
|
||||
int nblocks;
|
||||
MdfdVec *v;
|
||||
|
||||
@@ -279,7 +284,7 @@ mdextend(Relation reln, char *buffer)
|
||||
if ((pos = FileSeek(v->mdfd_vfd, 0L, SEEK_END)) < 0)
|
||||
return SM_FAIL;
|
||||
|
||||
if (pos % BLCKSZ != 0) /* the last block is incomplete */
|
||||
if (pos % BLCKSZ != 0) /* the last block is incomplete */
|
||||
{
|
||||
pos -= pos % BLCKSZ;
|
||||
if (FileSeek(v->mdfd_vfd, pos, SEEK_SET) < 0)
|
||||
@@ -414,6 +419,7 @@ mdclose_fd(int fd)
|
||||
/* if not closed already */
|
||||
if (v->mdfd_vfd >= 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* We sync the file descriptor so that we don't need to reopen
|
||||
* it at transaction commit to force changes to disk. (This
|
||||
@@ -436,6 +442,7 @@ mdclose_fd(int fd)
|
||||
{
|
||||
if (v->mdfd_vfd >= 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* We sync the file descriptor so that we don't need to reopen
|
||||
* it at transaction commit to force changes to disk. (This
|
||||
@@ -689,9 +696,11 @@ mdnblocks(Relation reln)
|
||||
{
|
||||
int fd;
|
||||
MdfdVec *v;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
int nblocks;
|
||||
int segno;
|
||||
|
||||
#endif
|
||||
|
||||
fd = _mdfd_getrelnfd(reln);
|
||||
@@ -738,13 +747,16 @@ mdtruncate(Relation reln, int nblocks)
|
||||
int curnblk;
|
||||
int fd;
|
||||
MdfdVec *v;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
MemoryContext oldcxt;
|
||||
int priorblocks;
|
||||
|
||||
#endif
|
||||
|
||||
/* NOTE: mdnblocks makes sure we have opened all existing segments,
|
||||
* so that truncate/delete loop will get them all!
|
||||
/*
|
||||
* NOTE: mdnblocks makes sure we have opened all existing segments, so
|
||||
* that truncate/delete loop will get them all!
|
||||
*/
|
||||
curnblk = mdnblocks(reln);
|
||||
if (nblocks < 0 || nblocks > curnblk)
|
||||
@@ -764,29 +776,34 @@ mdtruncate(Relation reln, int nblocks)
|
||||
|
||||
if (priorblocks > nblocks)
|
||||
{
|
||||
/* This segment is no longer wanted at all (and has already been
|
||||
* unlinked from the mdfd_chain).
|
||||
* We truncate the file before deleting it because if other
|
||||
* backends are holding the file open, the unlink will fail on
|
||||
* some platforms. Better a zero-size file gets left around than
|
||||
* a big file...
|
||||
|
||||
/*
|
||||
* This segment is no longer wanted at all (and has already
|
||||
* been unlinked from the mdfd_chain). We truncate the file
|
||||
* before deleting it because if other backends are holding
|
||||
* the file open, the unlink will fail on some platforms.
|
||||
* Better a zero-size file gets left around than a big file...
|
||||
*/
|
||||
FileTruncate(v->mdfd_vfd, 0);
|
||||
FileUnlink(v->mdfd_vfd);
|
||||
v = v->mdfd_chain;
|
||||
Assert(ov != &Md_fdvec[fd]); /* we never drop the 1st segment */
|
||||
Assert(ov != &Md_fdvec[fd]); /* we never drop the 1st
|
||||
* segment */
|
||||
pfree(ov);
|
||||
}
|
||||
else if (priorblocks + RELSEG_SIZE > nblocks)
|
||||
{
|
||||
/* This is the last segment we want to keep.
|
||||
* Truncate the file to the right length, and clear chain link
|
||||
* that points to any remaining segments (which we shall zap).
|
||||
* NOTE: if nblocks is exactly a multiple K of RELSEG_SIZE,
|
||||
* we will truncate the K+1st segment to 0 length but keep it.
|
||||
* This is mainly so that the right thing happens if nblocks=0.
|
||||
|
||||
/*
|
||||
* This is the last segment we want to keep. Truncate the file
|
||||
* to the right length, and clear chain link that points to
|
||||
* any remaining segments (which we shall zap). NOTE: if
|
||||
* nblocks is exactly a multiple K of RELSEG_SIZE, we will
|
||||
* truncate the K+1st segment to 0 length but keep it. This is
|
||||
* mainly so that the right thing happens if nblocks=0.
|
||||
*/
|
||||
int lastsegblocks = nblocks - priorblocks;
|
||||
int lastsegblocks = nblocks - priorblocks;
|
||||
|
||||
if (FileTruncate(v->mdfd_vfd, lastsegblocks * BLCKSZ) < 0)
|
||||
return -1;
|
||||
v->mdfd_lstbcnt = lastsegblocks;
|
||||
@@ -795,7 +812,9 @@ mdtruncate(Relation reln, int nblocks)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We still need this segment and 0 or more blocks beyond it,
|
||||
|
||||
/*
|
||||
* We still need this segment and 0 or more blocks beyond it,
|
||||
* so nothing to do here.
|
||||
*/
|
||||
v = v->mdfd_chain;
|
||||
@@ -842,7 +861,7 @@ mdcommit()
|
||||
continue;
|
||||
/* Sync the file entry */
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
for ( ; v != (MdfdVec *) NULL; v = v->mdfd_chain)
|
||||
for (; v != (MdfdVec *) NULL; v = v->mdfd_chain)
|
||||
#else
|
||||
if (v != (MdfdVec *) NULL)
|
||||
#endif
|
||||
@@ -853,7 +872,7 @@ mdcommit()
|
||||
}
|
||||
|
||||
return SM_SUCCESS;
|
||||
#endif /* XLOG */
|
||||
#endif /* XLOG */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -865,7 +884,9 @@ mdcommit()
|
||||
int
|
||||
mdabort()
|
||||
{
|
||||
/* We don't actually have to do anything here. fd.c will discard
|
||||
|
||||
/*
|
||||
* We don't actually have to do anything here. fd.c will discard
|
||||
* fsync-needed bits in its AtEOXact_Files() routine.
|
||||
*/
|
||||
return SM_SUCCESS;
|
||||
@@ -1073,8 +1094,10 @@ _mdfd_blind_getseg(char *dbname, char *relname, Oid dbid, Oid relid,
|
||||
{
|
||||
char *path;
|
||||
int fd;
|
||||
|
||||
#ifndef LET_OS_MANAGE_FILESIZE
|
||||
int segno;
|
||||
|
||||
#endif
|
||||
|
||||
/* construct the path to the relation */
|
||||
@@ -1085,7 +1108,7 @@ _mdfd_blind_getseg(char *dbname, char *relname, Oid dbid, Oid relid,
|
||||
segno = blkno / RELSEG_SIZE;
|
||||
if (segno > 0)
|
||||
{
|
||||
char *segpath = (char *) palloc(strlen(path) + 12);
|
||||
char *segpath = (char *) palloc(strlen(path) + 12);
|
||||
|
||||
sprintf(segpath, "%s.%d", path, segno);
|
||||
pfree(path);
|
||||
@@ -1110,6 +1133,7 @@ _mdnblocks(File file, Size blcksz)
|
||||
long len;
|
||||
|
||||
len = FileSeek(file, 0L, SEEK_END);
|
||||
if (len < 0) return 0; /* on failure, assume file is empty */
|
||||
if (len < 0)
|
||||
return 0; /* on failure, assume file is empty */
|
||||
return (BlockNumber) (len / blcksz);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.34 2000/04/10 23:41:52 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.35 2000/04/12 17:15:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -23,31 +23,31 @@ static void smgrshutdown(int dummy);
|
||||
|
||||
typedef struct f_smgr
|
||||
{
|
||||
int (*smgr_init) (void); /* may be NULL */
|
||||
int (*smgr_shutdown) (void); /* may be NULL */
|
||||
int (*smgr_init) (void); /* may be NULL */
|
||||
int (*smgr_shutdown) (void); /* may be NULL */
|
||||
int (*smgr_create) (Relation reln);
|
||||
int (*smgr_unlink) (Relation reln);
|
||||
int (*smgr_extend) (Relation reln, char *buffer);
|
||||
int (*smgr_open) (Relation reln);
|
||||
int (*smgr_close) (Relation reln);
|
||||
int (*smgr_read) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_write) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_flush) (Relation reln, BlockNumber blocknum,
|
||||
char *buffer);
|
||||
char *buffer);
|
||||
int (*smgr_blindwrt) (char *dbname, char *relname,
|
||||
Oid dbid, Oid relid,
|
||||
BlockNumber blkno, char *buffer,
|
||||
bool dofsync);
|
||||
Oid dbid, Oid relid,
|
||||
BlockNumber blkno, char *buffer,
|
||||
bool dofsync);
|
||||
int (*smgr_markdirty) (Relation reln, BlockNumber blkno);
|
||||
int (*smgr_blindmarkdirty) (char *dbname, char *relname,
|
||||
Oid dbid, Oid relid,
|
||||
BlockNumber blkno);
|
||||
Oid dbid, Oid relid,
|
||||
BlockNumber blkno);
|
||||
int (*smgr_nblocks) (Relation reln);
|
||||
int (*smgr_truncate) (Relation reln, int nblocks);
|
||||
int (*smgr_commit) (void); /* may be NULL */
|
||||
int (*smgr_abort) (void); /* may be NULL */
|
||||
int (*smgr_commit) (void); /* may be NULL */
|
||||
int (*smgr_abort) (void); /* may be NULL */
|
||||
} f_smgr;
|
||||
|
||||
/*
|
||||
@@ -59,14 +59,14 @@ static f_smgr smgrsw[] = {
|
||||
|
||||
/* magnetic disk */
|
||||
{mdinit, NULL, mdcreate, mdunlink, mdextend, mdopen, mdclose,
|
||||
mdread, mdwrite, mdflush, mdblindwrt, mdmarkdirty, mdblindmarkdirty,
|
||||
mdnblocks, mdtruncate, mdcommit, mdabort},
|
||||
mdread, mdwrite, mdflush, mdblindwrt, mdmarkdirty, mdblindmarkdirty,
|
||||
mdnblocks, mdtruncate, mdcommit, mdabort},
|
||||
|
||||
#ifdef STABLE_MEMORY_STORAGE
|
||||
/* main memory */
|
||||
{mminit, mmshutdown, mmcreate, mmunlink, mmextend, mmopen, mmclose,
|
||||
mmread, mmwrite, mmflush, mmblindwrt, mmmarkdirty, mmblindmarkdirty,
|
||||
mmnblocks, NULL, mmcommit, mmabort},
|
||||
mmread, mmwrite, mmflush, mmblindwrt, mmmarkdirty, mmblindmarkdirty,
|
||||
mmnblocks, NULL, mmcommit, mmabort},
|
||||
|
||||
#endif
|
||||
};
|
||||
@@ -178,7 +178,7 @@ smgrextend(int16 which, Relation reln, char *buffer)
|
||||
|
||||
if (status == SM_FAIL)
|
||||
elog(ERROR, "%s: cannot extend. Check free disk space.",
|
||||
RelationGetRelationName(reln));
|
||||
RelationGetRelationName(reln));
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -209,7 +209,7 @@ smgropen(int16 which, Relation reln)
|
||||
* This is currently called only from RelationFlushRelation() when
|
||||
* the relation cache entry is about to be dropped; could be doing
|
||||
* simple relation cache clear, or finishing up DROP TABLE.
|
||||
*
|
||||
*
|
||||
* Returns SM_SUCCESS on success, aborts on failure.
|
||||
*/
|
||||
int
|
||||
@@ -294,7 +294,7 @@ smgrflush(int16 which, Relation reln, BlockNumber blocknum, char *buffer)
|
||||
* this case, the buffer manager will call smgrblindwrt() with
|
||||
* the name and OID of the database and the relation to which the
|
||||
* buffer belongs. Every storage manager must be able to force
|
||||
* this page down to stable storage in this circumstance. The
|
||||
* this page down to stable storage in this circumstance. The
|
||||
* write should be synchronous if dofsync is true.
|
||||
*/
|
||||
int
|
||||
@@ -403,7 +403,7 @@ smgrnblocks(int16 which, Relation reln)
|
||||
|
||||
if ((nblocks = (*(smgrsw[which].smgr_nblocks)) (reln)) < 0)
|
||||
elog(ERROR, "cannot count blocks for %s",
|
||||
RelationGetRelationName(reln));
|
||||
RelationGetRelationName(reln));
|
||||
|
||||
return nblocks;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user