1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-09 06:21:09 +03:00

Remove old-style VACUUM FULL (which was known for a little while as

VACUUM FULL INPLACE), along with a boatload of subsidiary code and complexity.
Per discussion, the use case for this method of vacuuming is no longer large
enough to justify maintaining it; not to mention that we don't wish to invest
the work that would be needed to make it play nicely with Hot Standby.

Aside from the code directly related to old-style VACUUM FULL, this commit
removes support for certain WAL record types that could only be generated
within VACUUM FULL, redirect-pointer removal in heap_page_prune, and
nontransactional generation of cache invalidation sinval messages (the last
being the sticking point for Hot Standby).

We still have to retain all code that copes with finding HEAP_MOVED_OFF and
HEAP_MOVED_IN flag bits on existing tuples.  This can't be removed as long
as we want to support in-place update from pre-9.0 databases.
This commit is contained in:
Tom Lane
2010-02-08 04:33:55 +00:00
parent 1ddc2703a9
commit 0a469c8769
41 changed files with 247 additions and 3737 deletions

View File

@@ -1,4 +1,4 @@
$PostgreSQL: pgsql/src/backend/access/nbtree/README,v 1.21 2009/12/19 01:32:32 sriggs Exp $
$PostgreSQL: pgsql/src/backend/access/nbtree/README,v 1.22 2010/02/08 04:33:53 tgl Exp $
Btree Indexing
==============
@@ -171,9 +171,9 @@ We consider deleting an entire page from the btree only when it's become
completely empty of items. (Merging partly-full pages would allow better
space reuse, but it seems impractical to move existing data items left or
right to make this happen --- a scan moving in the opposite direction
might miss the items if so. We could do it during VACUUM FULL, though.)
Also, we *never* delete the rightmost page on a tree level (this
restriction simplifies the traversal algorithms, as explained below).
might miss the items if so.) Also, we *never* delete the rightmost page
on a tree level (this restriction simplifies the traversal algorithms, as
explained below).
To delete an empty page, we acquire write lock on its left sibling (if
any), the target page itself, the right sibling (there must be one), and
@@ -266,8 +266,7 @@ transactions that were running at the time of deletion are dead; which is
overly strong, but is simple to implement within Postgres. When marked
dead, a deleted page is labeled with the next-transaction counter value.
VACUUM can reclaim the page for re-use when this transaction number is
older than the oldest open transaction. (NOTE: VACUUM FULL can reclaim
such pages immediately.)
older than the oldest open transaction.
Reclaiming a page doesn't actually change its state on disk --- we simply
record it in the shared-memory free space map, from which it will be

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.117 2010/02/01 13:40:28 sriggs Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.118 2010/02/08 04:33:53 tgl Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -877,7 +877,7 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
* frequently.
*/
int
_bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
_bt_pagedel(Relation rel, Buffer buf, BTStack stack)
{
int result;
BlockNumber target,
@@ -1207,14 +1207,13 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* Mark the page itself deleted. It can be recycled when all current
* transactions are gone; or immediately if we're doing VACUUM FULL.
* transactions are gone.
*/
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_flags &= ~BTP_HALF_DEAD;
opaque->btpo_flags |= BTP_DELETED;
opaque->btpo.xact =
vacuum_full ? FrozenTransactionId : ReadNewTransactionId();
opaque->btpo.xact = ReadNewTransactionId();
/* And update the metapage, if needed */
if (BufferIsValid(metabuf))
@@ -1350,7 +1349,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
{
/* recursive call will release pbuf */
_bt_relbuf(rel, rbuf);
result = _bt_pagedel(rel, pbuf, stack->bts_parent, vacuum_full) + 1;
result = _bt_pagedel(rel, pbuf, stack->bts_parent) + 1;
_bt_relbuf(rel, buf);
}
else if (parent_one_child && rightsib_empty)
@@ -1358,7 +1357,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
_bt_relbuf(rel, pbuf);
_bt_relbuf(rel, buf);
/* recursive call will release rbuf */
result = _bt_pagedel(rel, rbuf, stack, vacuum_full) + 1;
result = _bt_pagedel(rel, rbuf, stack) + 1;
}
else
{

View File

@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.174 2010/01/02 16:57:35 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.175 2010/02/08 04:33:53 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -579,12 +579,12 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
IndexFreeSpaceMapVacuum(info->index);
/*
* During a non-FULL vacuum it's quite possible for us to be fooled by
* concurrent page splits into double-counting some index tuples, so
* disbelieve any total that exceeds the underlying heap's count ... if we
* know that accurately. Otherwise this might just make matters worse.
* It's quite possible for us to be fooled by concurrent page splits into
* double-counting some index tuples, so disbelieve any total that exceeds
* the underlying heap's count ... if we know that accurately. Otherwise
* this might just make matters worse.
*/
if (!info->vacuum_full && !info->estimated_count)
if (!info->estimated_count)
{
if (stats->num_index_tuples > info->num_heap_tuples)
stats->num_index_tuples = info->num_heap_tuples;
@@ -686,27 +686,6 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
}
}
/*
* During VACUUM FULL, we truncate off any recyclable pages at the end of
* the index. In a normal vacuum it'd be unsafe to do this except by
* acquiring exclusive lock on the index and then rechecking all the
* pages; doesn't seem worth it.
*/
if (info->vacuum_full && vstate.lastUsedPage < num_pages - 1)
{
BlockNumber new_pages = vstate.lastUsedPage + 1;
/*
* Okay to truncate.
*/
RelationTruncate(rel, new_pages);
/* update statistics */
stats->pages_removed += num_pages - new_pages;
vstate.totFreePages -= (num_pages - new_pages);
num_pages = new_pages;
}
/*
* InHotStandby we need to scan right up to the end of the index for
* correct locking, so we may need to write a WAL record for the final
@@ -963,26 +942,12 @@ restart:
MemoryContextReset(vstate->pagedelcontext);
oldcontext = MemoryContextSwitchTo(vstate->pagedelcontext);
ndel = _bt_pagedel(rel, buf, NULL, info->vacuum_full);
ndel = _bt_pagedel(rel, buf, NULL);
/* count only this page, else may double-count parent */
if (ndel)
stats->pages_deleted++;
/*
* During VACUUM FULL it's okay to recycle deleted pages immediately,
* since there can be no other transactions scanning the index. Note
* that we will only recycle the current page and not any parent pages
* that _bt_pagedel might have recursed to; this seems reasonable in
* the name of simplicity. (Trying to do otherwise would mean we'd
* have to sort the list of recyclable pages we're building.)
*/
if (ndel && info->vacuum_full)
{
RecordFreeIndexPage(rel, blkno);
vstate->totFreePages++;
}
MemoryContextSwitchTo(oldcontext);
/* pagedel released buffer, so we shouldn't */
}

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.59 2010/01/29 17:10:05 sriggs Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.60 2010/02/08 04:33:53 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1079,8 +1079,8 @@ btree_xlog_cleanup(void)
Relation reln;
reln = CreateFakeRelcacheEntry(action->node);
if (_bt_pagedel(reln, buf, NULL, true) == 0)
elog(PANIC, "btree_xlog_cleanup: _bt_pagdel failed");
if (_bt_pagedel(reln, buf, NULL) == 0)
elog(PANIC, "btree_xlog_cleanup: _bt_pagedel failed");
FreeFakeRelcacheEntry(reln);
}
}