1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-16 15:02:33 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.48 2005/08/20 23:26:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.49 2005/10/15 02:49:25 momjian Exp $
*
*
* NOTES:
@@ -222,7 +222,7 @@ static HTAB *FreeSpaceMapRelHash; /* points to (what used to be)
static void CheckFreeSpaceMapStatistics(int elevel, int numRels,
double needed);
double needed);
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
static FSMRelation *create_fsm_rel(RelFileNode *rel);
static void delete_fsm_rel(FSMRelation *fsmrel);
@@ -295,7 +295,7 @@ InitFreeSpaceMap(void)
if (!FreeSpaceMapRelHash)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("insufficient shared memory for free space map")));
errmsg("insufficient shared memory for free space map")));
if (found)
return;
@@ -307,14 +307,14 @@ InitFreeSpaceMap(void)
if (nchunks <= MaxFSMRelations)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
CHUNKPAGES)));
errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
CHUNKPAGES)));
FreeSpaceMap->arena = (char *) ShmemAlloc((Size) nchunks * CHUNKBYTES);
if (FreeSpaceMap->arena == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("insufficient shared memory for free space map")));
errmsg("insufficient shared memory for free space map")));
FreeSpaceMap->totalChunks = nchunks;
FreeSpaceMap->usedChunks = 0;
@@ -371,10 +371,10 @@ GetPageWithFreeSpace(RelFileNode *rel, Size spaceNeeded)
fsmrel = create_fsm_rel(rel);
/*
* Update the moving average of space requests. This code implements
* an exponential moving average with an equivalent period of about 63
* requests. Ignore silly requests, however, to ensure that the
* average stays sane.
* Update the moving average of space requests. This code implements an
* exponential moving average with an equivalent period of about 63
* requests. Ignore silly requests, however, to ensure that the average
* stays sane.
*/
if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
{
@@ -478,10 +478,10 @@ RecordRelationFreeSpace(RelFileNode *rel,
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
* Note we don't record info about a relation unless there's already
* an FSM entry for it, implying someone has done GetPageWithFreeSpace
* for it. Inactive rels thus will not clutter the map simply by
* being vacuumed.
* Note we don't record info about a relation unless there's already an
* FSM entry for it, implying someone has done GetPageWithFreeSpace for
* it. Inactive rels thus will not clutter the map simply by being
* vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
@@ -494,8 +494,8 @@ RecordRelationFreeSpace(RelFileNode *rel,
curAllocPages = curAlloc * CHUNKPAGES;
/*
* If the data fits in our current allocation, just copy it;
* otherwise must compress.
* If the data fits in our current allocation, just copy it; otherwise
* must compress.
*/
newLocation = (FSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -567,10 +567,9 @@ RecordIndexFreeSpace(RelFileNode *rel,
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
* Note we don't record info about a relation unless there's already
* an FSM entry for it, implying someone has done GetFreeIndexPage for
* it. Inactive rels thus will not clutter the map simply by being
* vacuumed.
* Note we don't record info about a relation unless there's already an
* FSM entry for it, implying someone has done GetFreeIndexPage for it.
* Inactive rels thus will not clutter the map simply by being vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
@@ -584,9 +583,9 @@ RecordIndexFreeSpace(RelFileNode *rel,
curAllocPages = curAlloc * INDEXCHUNKPAGES;
/*
* If the data fits in our current allocation, just copy it;
* otherwise must compress. But compression is easy: we merely
* forget extra pages.
* If the data fits in our current allocation, just copy it; otherwise
* must compress. But compression is easy: we merely forget extra
* pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -708,34 +707,34 @@ PrintFreeSpaceMapStatistics(int elevel)
ereport(elevel,
(errmsg("free space map contains %d pages in %d relations",
storedPages, numRels),
errdetail("A total of %.0f page slots are in use (including overhead).\n"
"%.0f page slots are required to track all free space.\n"
"Current limits are: %d page slots, %d relations, using %.0f KB.",
Min(needed, MaxFSMPages),
needed, MaxFSMPages, MaxFSMRelations,
(double) FreeSpaceShmemSize() / 1024.0)));
errdetail("A total of %.0f page slots are in use (including overhead).\n"
"%.0f page slots are required to track all free space.\n"
"Current limits are: %d page slots, %d relations, using %.0f KB.",
Min(needed, MaxFSMPages),
needed, MaxFSMPages, MaxFSMRelations,
(double) FreeSpaceShmemSize() / 1024.0)));
CheckFreeSpaceMapStatistics(NOTICE, numRels, needed);
/* Print to server logs too because is deals with a config variable. */
CheckFreeSpaceMapStatistics(LOG, numRels, needed);
}
static void
CheckFreeSpaceMapStatistics(int elevel, int numRels, double needed)
{
if (numRels == MaxFSMRelations)
if (numRels == MaxFSMRelations)
ereport(elevel,
(errmsg("max_fsm_relations(%d) equals the number of relations checked",
MaxFSMRelations),
errhint("You have >= %d relations.\n"
"Consider increasing the configuration parameter \"max_fsm_relations\".",
numRels)));
(errmsg("max_fsm_relations(%d) equals the number of relations checked",
MaxFSMRelations),
errhint("You have >= %d relations.\n"
"Consider increasing the configuration parameter \"max_fsm_relations\".",
numRels)));
else if (needed > MaxFSMPages)
ereport(elevel,
(errmsg("the number of page slots needed (%.0f) exceeds max_fsm_pages (%d)",
needed, MaxFSMPages),
errhint("Consider increasing the configuration parameter \"max_fsm_pages\"\n"
"to a value over %.0f.", needed)));
(errmsg("the number of page slots needed (%.0f) exceeds max_fsm_pages (%d)",
needed, MaxFSMPages),
errhint("Consider increasing the configuration parameter \"max_fsm_pages\"\n"
"to a value over %.0f.", needed)));
}
/*
@@ -753,7 +752,7 @@ DumpFreeSpaceMap(int code, Datum arg)
FSMRelation *fsmrel;
/* Try to create file */
unlink(FSM_CACHE_FILENAME); /* in case it exists w/wrong permissions */
unlink(FSM_CACHE_FILENAME); /* in case it exists w/wrong permissions */
fp = AllocateFile(FSM_CACHE_FILENAME, PG_BINARY_W);
if (fp == NULL)
@@ -917,11 +916,11 @@ LoadFreeSpaceMap(void)
}
/*
* Okay, create the FSM entry and insert data into it. Since the
* rels were stored in reverse usage order, at the end of the loop
* they will be correctly usage-ordered in memory; and if
* MaxFSMRelations is less than it used to be, we will correctly
* drop the least recently used ones.
* Okay, create the FSM entry and insert data into it. Since the rels
* were stored in reverse usage order, at the end of the loop they
* will be correctly usage-ordered in memory; and if MaxFSMRelations
* is less than it used to be, we will correctly drop the least
* recently used ones.
*/
fsmrel = create_fsm_rel(&relheader.key);
fsmrel->avgRequest = relheader.avgRequest;
@@ -936,8 +935,8 @@ LoadFreeSpaceMap(void)
/*
* If the data fits in our current allocation, just copy it;
* otherwise must compress. But compression is easy: we
* merely forget extra pages.
* otherwise must compress. But compression is easy: we merely
* forget extra pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -1105,10 +1104,10 @@ realloc_fsm_rel(FSMRelation *fsmrel, int nPages, bool isIndex)
myAlloc = fsm_calc_target_allocation(myRequest);
/*
* Need to reallocate space if (a) my target allocation is more than
* my current allocation, AND (b) my actual immediate need
* (myRequest+1 chunks) is more than my current allocation. Otherwise
* just store the new data in-place.
* Need to reallocate space if (a) my target allocation is more than my
* current allocation, AND (b) my actual immediate need (myRequest+1
* chunks) is more than my current allocation. Otherwise just store the
* new data in-place.
*/
curAlloc = fsm_current_allocation(fsmrel);
if (myAlloc > curAlloc && (myRequest + 1) > curAlloc && nPages > 0)
@@ -1241,8 +1240,7 @@ find_free_space(FSMRelation *fsmrel, Size spaceNeeded)
if (spaceAvail >= spaceNeeded)
{
/*
* Found what we want --- adjust the entry, and update
* nextPage.
* Found what we want --- adjust the entry, and update nextPage.
*/
FSMPageSetSpace(page, spaceAvail - spaceNeeded);
fsmrel->nextPage = pageIndex + 1;
@@ -1266,10 +1264,10 @@ find_index_free_space(FSMRelation *fsmrel)
BlockNumber result;
/*
* If isIndex isn't set, it could be that RecordIndexFreeSpace() has
* never yet been called on this relation, and we're still looking at
* the default setting from create_fsm_rel(). If so, just act as
* though there's no space.
* If isIndex isn't set, it could be that RecordIndexFreeSpace() has never
* yet been called on this relation, and we're still looking at the
* default setting from create_fsm_rel(). If so, just act as though
* there's no space.
*/
if (!fsmrel->isIndex)
{
@@ -1279,10 +1277,10 @@ find_index_free_space(FSMRelation *fsmrel)
}
/*
* For indexes, there's no need for the nextPage state variable; we
* just remove and return the first available page. (We could save
* cycles here by returning the last page, but it seems better to
* encourage re-use of lower-numbered pages.)
* For indexes, there's no need for the nextPage state variable; we just
* remove and return the first available page. (We could save cycles here
* by returning the last page, but it seems better to encourage re-use of
* lower-numbered pages.)
*/
if (fsmrel->storedPages <= 0)
return InvalidBlockNumber; /* no pages available */
@@ -1318,10 +1316,10 @@ fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail)
else
{
/*
* No existing entry; ignore the call. We used to add the page to
* the FSM --- but in practice, if the page hasn't got enough
* space to satisfy the caller who's kicking it back to us, then
* it's probably uninteresting to everyone else as well.
* No existing entry; ignore the call. We used to add the page to the
* FSM --- but in practice, if the page hasn't got enough space to
* satisfy the caller who's kicking it back to us, then it's probably
* uninteresting to everyone else as well.
*/
}
}
@@ -1454,25 +1452,23 @@ compact_fsm_storage(void)
/*
* It's possible that we have to move data down, not up, if the
* allocations of previous rels expanded. This normally means
* that our allocation expanded too (or at least got no worse),
* and ditto for later rels. So there should be room to move all
* our data down without dropping any --- but we might have to
* push down following rels to acquire the room. We don't want to
* do the push more than once, so pack everything against the end
* of the arena if so.
* allocations of previous rels expanded. This normally means that
* our allocation expanded too (or at least got no worse), and ditto
* for later rels. So there should be room to move all our data down
* without dropping any --- but we might have to push down following
* rels to acquire the room. We don't want to do the push more than
* once, so pack everything against the end of the arena if so.
*
* In corner cases where we are on the short end of a roundoff choice
* that we were formerly on the long end of, it's possible that we
* have to move down and compress our data too. In fact, even
* after pushing down the following rels, there might not be as
* much space as we computed for this rel above --- that would
* imply that some following rel(s) are also on the losing end of
* roundoff choices. We could handle this fairly by doing the
* per-rel compactions out-of-order, but that seems like way too
* much complexity to deal with a very infrequent corner case.
* Instead, we simply drop pages from the end of the current rel's
* data until it fits.
* have to move down and compress our data too. In fact, even after
* pushing down the following rels, there might not be as much space
* as we computed for this rel above --- that would imply that some
* following rel(s) are also on the losing end of roundoff choices. We
* could handle this fairly by doing the per-rel compactions
* out-of-order, but that seems like way too much complexity to deal
* with a very infrequent corner case. Instead, we simply drop pages
* from the end of the current rel's data until it fits.
*/
if (newChunkIndex > oldChunkIndex)
{
@@ -1508,12 +1504,11 @@ compact_fsm_storage(void)
newAlloc = limitChunkIndex - newChunkIndex;
/*
* If newAlloc < 0 at this point, we are moving the
* rel's firstChunk into territory currently assigned
* to a later rel. This is okay so long as we do not
* copy any data. The rels will be back in
* nondecreasing firstChunk order at completion of the
* compaction pass.
* If newAlloc < 0 at this point, we are moving the rel's
* firstChunk into territory currently assigned to a later
* rel. This is okay so long as we do not copy any data.
* The rels will be back in nondecreasing firstChunk order
* at completion of the compaction pass.
*/
if (newAlloc < 0)
newAlloc = 0;
@@ -1530,9 +1525,9 @@ compact_fsm_storage(void)
else if (newAllocPages < fsmrel->storedPages)
{
/*
* Need to compress the page data. For an index,
* "compression" just means dropping excess pages; otherwise
* we try to keep the ones with the most space.
* Need to compress the page data. For an index, "compression"
* just means dropping excess pages; otherwise we try to keep the
* ones with the most space.
*/
if (fsmrel->isIndex)
{
@@ -1863,7 +1858,7 @@ DumpFreeSpace(void)
relNum++;
fprintf(stderr, "Map %d: rel %u/%u/%u isIndex %d avgRequest %u lastPageCount %d nextPage %d\nMap= ",
relNum,
fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
(int) fsmrel->isIndex, fsmrel->avgRequest,
fsmrel->lastPageCount, fsmrel->nextPage);
if (fsmrel->isIndex)