1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-11 20:28:21 +03:00

Remove old-style VACUUM FULL (which was known for a little while as

VACUUM FULL INPLACE), along with a boatload of subsidiary code and complexity.
Per discussion, the use case for this method of vacuuming is no longer large
enough to justify maintaining it; not to mention that we don't wish to invest
the work that would be needed to make it play nicely with Hot Standby.

Aside from the code directly related to old-style VACUUM FULL, this commit
removes support for certain WAL record types that could only be generated
within VACUUM FULL, redirect-pointer removal in heap_page_prune, and
nontransactional generation of cache invalidation sinval messages (the last
being the sticking point for Hot Standby).

We still have to retain all code that copes with finding HEAP_MOVED_OFF and
HEAP_MOVED_IN flag bits on existing tuples.  This can't be removed as long
as we want to support in-place update from pre-9.0 databases.
This commit is contained in:
Tom Lane
2010-02-08 04:33:55 +00:00
parent 1ddc2703a9
commit 0a469c8769
41 changed files with 247 additions and 3737 deletions

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.46 2010/01/02 16:57:34 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.47 2010/02/08 04:33:52 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -29,7 +29,7 @@
typedef struct GistBulkDeleteResult
{
IndexBulkDeleteResult std; /* common state */
bool needFullVacuum;
bool needReindex;
} GistBulkDeleteResult;
typedef struct
@ -496,12 +496,8 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
}
/*
* For usual vacuum just update FSM, for full vacuum
* reforms parent tuples if some of childs was deleted or changed,
* update invalid tuples (they can exist from last crash recovery only),
* tries to get smaller index
* VACUUM cleanup: update FSM
*/
Datum
gistvacuumcleanup(PG_FUNCTION_ARGS)
{
@ -533,47 +529,15 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
*/
}
/* gistVacuumUpdate may cause hard work */
if (info->vacuum_full)
{
GistVacuum gv;
ArrayTuple res;
/* note: vacuum.c already acquired AccessExclusiveLock on index */
gv.index = rel;
initGISTstate(&(gv.giststate), rel);
gv.opCtx = createTempGistContext();
gv.result = stats;
gv.strategy = info->strategy;
/* walk through the entire index for update tuples */
res = gistVacuumUpdate(&gv, GIST_ROOT_BLKNO, false);
/* cleanup */
if (res.itup)
{
int i;
for (i = 0; i < res.ituplen; i++)
pfree(res.itup[i]);
pfree(res.itup);
}
freeGISTstate(&(gv.giststate));
MemoryContextDelete(gv.opCtx);
}
else if (stats->needFullVacuum)
if (stats->needReindex)
ereport(NOTICE,
(errmsg("index \"%s\" needs VACUUM FULL or REINDEX to finish crash recovery",
RelationGetRelationName(rel))));
/*
* If vacuum full, we already have exclusive lock on the index. Otherwise,
* need lock unless it's local to this backend.
* Need lock unless it's local to this backend.
*/
if (info->vacuum_full)
needLock = false;
else
needLock = !RELATION_IS_LOCAL(rel);
needLock = !RELATION_IS_LOCAL(rel);
/* try to find deleted pages */
if (needLock)
@ -606,14 +570,6 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
}
lastBlock = npages - 1;
if (info->vacuum_full && lastFilledBlock < lastBlock)
{ /* try to truncate index */
RelationTruncate(rel, lastFilledBlock + 1);
stats->std.pages_removed = lastBlock - lastFilledBlock;
totFreePages = totFreePages - stats->std.pages_removed;
}
/* Finally, vacuum the FSM */
IndexFreeSpaceMapVacuum(info->index);
@ -799,7 +755,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
stack->next = ptr;
if (GistTupleIsInvalid(idxtuple))
stats->needFullVacuum = true;
stats->needReindex = true;
}
}