mirror of
https://github.com/postgres/postgres.git
synced 2025-07-14 08:21:07 +03:00
Remove old-style VACUUM FULL (which was known for a little while as
VACUUM FULL INPLACE), along with a boatload of subsidiary code and complexity. Per discussion, the use case for this method of vacuuming is no longer large enough to justify maintaining it; not to mention that we don't wish to invest the work that would be needed to make it play nicely with Hot Standby. Aside from the code directly related to old-style VACUUM FULL, this commit removes support for certain WAL record types that could only be generated within VACUUM FULL, redirect-pointer removal in heap_page_prune, and nontransactional generation of cache invalidation sinval messages (the last being the sticking point for Hot Standby). We still have to retain all code that copes with finding HEAP_MOVED_OFF and HEAP_MOVED_IN flag bits on existing tuples. This can't be removed as long as we want to support in-place update from pre-9.0 databases.
This commit is contained in:
99
src/backend/utils/cache/inval.c
vendored
99
src/backend/utils/cache/inval.c
vendored
@ -80,7 +80,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.94 2010/02/07 20:48:10 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.95 2010/02/08 04:33:54 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1112,103 +1112,6 @@ CommandEndInvalidationMessages(void)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* BeginNonTransactionalInvalidation
|
||||
* Prepare for invalidation messages for nontransactional updates.
|
||||
*
|
||||
* A nontransactional invalidation is one that must be sent whether or not
|
||||
* the current transaction eventually commits. We arrange for all invals
|
||||
* queued between this call and EndNonTransactionalInvalidation() to be sent
|
||||
* immediately when the latter is called.
|
||||
*
|
||||
* Currently, this is only used by heap_page_prune(), and only when it is
|
||||
* invoked during VACUUM FULL's first pass over a table. We expect therefore
|
||||
* that we are not inside a subtransaction and there are no already-pending
|
||||
* invalidations. This could be relaxed by setting up a new nesting level of
|
||||
* invalidation data, but for now there's no need. Note that heap_page_prune
|
||||
* knows that this function does not change any state, and therefore there's
|
||||
* no need to worry about cleaning up if there's an elog(ERROR) before
|
||||
* reaching EndNonTransactionalInvalidation (the invals will just be thrown
|
||||
* away if that happens).
|
||||
*
|
||||
* Note that these are not replayed in standby mode.
|
||||
*/
|
||||
void
|
||||
BeginNonTransactionalInvalidation(void)
|
||||
{
|
||||
/* Must be at top of stack */
|
||||
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
|
||||
|
||||
/* Must not have any previously-queued activity */
|
||||
Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL);
|
||||
Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL);
|
||||
Assert(transInvalInfo->CurrentCmdInvalidMsgs.cclist == NULL);
|
||||
Assert(transInvalInfo->CurrentCmdInvalidMsgs.rclist == NULL);
|
||||
Assert(transInvalInfo->RelcacheInitFileInval == false);
|
||||
|
||||
SharedInvalidMessagesArray = NULL;
|
||||
numSharedInvalidMessagesArray = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* EndNonTransactionalInvalidation
|
||||
* Process queued-up invalidation messages for nontransactional updates.
|
||||
*
|
||||
* We expect to find messages in CurrentCmdInvalidMsgs only (else there
|
||||
* was a CommandCounterIncrement within the "nontransactional" update).
|
||||
* We must process them locally and send them out to the shared invalidation
|
||||
* message queue.
|
||||
*
|
||||
* We must also reset the lists to empty and explicitly free memory (we can't
|
||||
* rely on end-of-transaction cleanup for that).
|
||||
*/
|
||||
void
|
||||
EndNonTransactionalInvalidation(void)
|
||||
{
|
||||
InvalidationChunk *chunk;
|
||||
InvalidationChunk *next;
|
||||
|
||||
/* Must be at top of stack */
|
||||
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
|
||||
|
||||
/* Must not have any prior-command messages */
|
||||
Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL);
|
||||
Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL);
|
||||
|
||||
/*
|
||||
* At present, this function is only used for CTID-changing updates; since
|
||||
* the relcache init file doesn't store any tuple CTIDs, we don't have to
|
||||
* invalidate it. That might not be true forever though, in which case
|
||||
* we'd need code similar to AtEOXact_Inval.
|
||||
*/
|
||||
|
||||
/* Send out the invals */
|
||||
ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
|
||||
LocalExecuteInvalidationMessage);
|
||||
ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
|
||||
SendSharedInvalidMessages);
|
||||
|
||||
/* Clean up and release memory */
|
||||
for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.cclist;
|
||||
chunk != NULL;
|
||||
chunk = next)
|
||||
{
|
||||
next = chunk->next;
|
||||
pfree(chunk);
|
||||
}
|
||||
for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.rclist;
|
||||
chunk != NULL;
|
||||
chunk = next)
|
||||
{
|
||||
next = chunk->next;
|
||||
pfree(chunk);
|
||||
}
|
||||
transInvalInfo->CurrentCmdInvalidMsgs.cclist = NULL;
|
||||
transInvalInfo->CurrentCmdInvalidMsgs.rclist = NULL;
|
||||
transInvalInfo->RelcacheInitFileInval = false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CacheInvalidateHeapTuple
|
||||
* Register the given tuple for invalidation at end of command
|
||||
|
@ -50,7 +50,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.115 2010/01/02 16:57:58 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.116 2010/02/08 04:33:54 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -91,9 +91,12 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
|
||||
* code in heapam.c relies on that!)
|
||||
*
|
||||
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
|
||||
* we can always set the hint bits, since VACUUM FULL always uses synchronous
|
||||
* commits and doesn't move tuples that weren't previously hinted. (This is
|
||||
* not known by this subroutine, but is applied by its callers.)
|
||||
* we can always set the hint bits, since old-style VACUUM FULL always used
|
||||
* synchronous commits and didn't move tuples that weren't previously
|
||||
* hinted. (This is not known by this subroutine, but is applied by its
|
||||
* callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
|
||||
* module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
|
||||
* support in-place update from pre-9.0 databases.
|
||||
*
|
||||
* Normal commits may be asynchronous, so for those we need to get the LSN
|
||||
* of the transaction and then check whether this is flushed.
|
||||
|
Reference in New Issue
Block a user