1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-15 19:21:59 +03:00

Support disabling index bypassing by VACUUM.

Generalize the INDEX_CLEANUP VACUUM parameter (and the corresponding
reloption): make it into a ternary style boolean parameter.  It now
exposes a third option, "auto".  The "auto" option (which is now the
default) enables the "bypass index vacuuming" optimization added by
commit 1e55e7d1.

"VACUUM (INDEX_CLEANUP TRUE)" is redefined to once again make VACUUM
simply do any required index vacuuming, regardless of how few dead
tuples are encountered during the first scan of the target heap relation
(unless there are exactly zero).  This gives users a way of opting out
of the "bypass index vacuuming" optimization, if for whatever reason
that proves necessary.  It is also expected to be used by PostgreSQL
developers as a testing option from time to time.

"VACUUM (INDEX_CLEANUP FALSE)" does the same thing as it always has: it
forcibly disables both index vacuuming and index cleanup.  It's not
expected to be used much in PostgreSQL 14.  The failsafe mechanism added
by commit 1e55e7d1 addresses the same problem in a simpler way.
INDEX_CLEANUP can now be thought of as a testing and compatibility
option.

Author: Peter Geoghegan <pg@bowt.ie>
Reviewed-By: Masahiko Sawada <sawada.mshk@gmail.com>
Reviewed-By: Justin Pryzby <pryzby@telsasoft.com>
Discussion: https://postgr.es/m/CAH2-WznrBoCST4_Gxh_G9hA8NzGUbeBGnOUC8FcXcrhqsv6OHQ@mail.gmail.com
This commit is contained in:
Peter Geoghegan
2021-06-18 20:04:07 -07:00
parent 09126984a2
commit 3499df0dee
13 changed files with 313 additions and 127 deletions

View File

@ -140,15 +140,6 @@ static relopt_bool boolRelOpts[] =
},
false
},
{
{
"vacuum_index_cleanup",
"Enables index vacuuming and index cleanup",
RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
ShareUpdateExclusiveLock
},
true
},
{
{
"vacuum_truncate",
@ -474,6 +465,21 @@ static relopt_real realRelOpts[] =
{{NULL}}
};
/* values from StdRdOptIndexCleanup */
relopt_enum_elt_def StdRdOptIndexCleanupValues[] =
{
{"auto", STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO},
{"on", STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON},
{"off", STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF},
{"true", STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON},
{"false", STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF},
{"yes", STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON},
{"no", STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF},
{"1", STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON},
{"0", STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF},
{(const char *) NULL} /* list terminator */
};
/* values from GistOptBufferingMode */
relopt_enum_elt_def gistBufferingOptValues[] =
{
@ -494,6 +500,17 @@ relopt_enum_elt_def viewCheckOptValues[] =
static relopt_enum enumRelOpts[] =
{
{
{
"vacuum_index_cleanup",
"Controls index vacuuming and index cleanup",
RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
ShareUpdateExclusiveLock
},
StdRdOptIndexCleanupValues,
STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO,
gettext_noop("Valid values are \"on\", \"off\", and \"auto\".")
},
{
{
"buffering",

View File

@ -308,11 +308,16 @@ typedef struct LVRelState
Relation rel;
Relation *indrels;
int nindexes;
/* Do index vacuuming/cleanup? */
/* Wraparound failsafe has been triggered? */
bool failsafe_active;
/* Consider index vacuuming bypass optimization? */
bool consider_bypass_optimization;
/* Doing index vacuuming, index cleanup, rel truncation? */
bool do_index_vacuuming;
bool do_index_cleanup;
/* Wraparound failsafe in effect? (implies !do_index_vacuuming) */
bool do_failsafe;
bool do_rel_truncate;
/* Buffer access strategy and parallel state */
BufferAccessStrategy bstrategy;
@ -405,7 +410,7 @@ static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
GlobalVisState *vistest,
LVPagePruneState *prunestate);
static void lazy_vacuum(LVRelState *vacrel, bool onecall);
static void lazy_vacuum(LVRelState *vacrel);
static bool lazy_vacuum_all_indexes(LVRelState *vacrel);
static void lazy_vacuum_heap_rel(LVRelState *vacrel);
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno,
@ -435,8 +440,7 @@ static IndexBulkDeleteResult *lazy_cleanup_one_index(Relation indrel,
double reltuples,
bool estimated_count,
LVRelState *vacrel);
static bool should_attempt_truncation(LVRelState *vacrel,
VacuumParams *params);
static bool should_attempt_truncation(LVRelState *vacrel);
static void lazy_truncate_heap(LVRelState *vacrel);
static BlockNumber count_nondeletable_pages(LVRelState *vacrel,
bool *lock_waiter_detected);
@ -506,10 +510,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
TransactionId FreezeLimit;
MultiXactId MultiXactCutoff;
Assert(params != NULL);
Assert(params->index_cleanup != VACOPT_TERNARY_DEFAULT);
Assert(params->truncate != VACOPT_TERNARY_DEFAULT);
/* measure elapsed time iff autovacuum logging requires it */
if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
{
@ -557,14 +557,41 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
vacrel->rel = rel;
vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
&vacrel->indrels);
vacrel->failsafe_active = false;
vacrel->consider_bypass_optimization = true;
/*
* The index_cleanup param either disables index vacuuming and cleanup or
* forces it to go ahead when we would otherwise apply the index bypass
* optimization. The default is 'auto', which leaves the final decision
* up to lazy_vacuum().
*
* The truncate param allows user to avoid attempting relation truncation,
* though it can't force truncation to happen.
*/
Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
params->truncate != VACOPTVALUE_AUTO);
vacrel->do_index_vacuuming = true;
vacrel->do_index_cleanup = true;
vacrel->do_failsafe = false;
if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
if (params->index_cleanup == VACOPTVALUE_DISABLED)
{
/* Force disable index vacuuming up-front */
vacrel->do_index_vacuuming = false;
vacrel->do_index_cleanup = false;
}
else if (params->index_cleanup == VACOPTVALUE_ENABLED)
{
/* Force index vacuuming. Note that failsafe can still bypass. */
vacrel->consider_bypass_optimization = false;
}
else
{
/* Default/auto, make all decisions dynamically */
Assert(params->index_cleanup == VACOPTVALUE_AUTO);
}
vacrel->bstrategy = bstrategy;
vacrel->old_rel_pages = rel->rd_rel->relpages;
vacrel->old_live_tuples = rel->rd_rel->reltuples;
@ -632,7 +659,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
/*
* Optionally truncate the relation.
*/
if (should_attempt_truncation(vacrel, params))
if (should_attempt_truncation(vacrel))
{
/*
* Update error traceback information. This is the last phase during
@ -791,7 +818,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
{
msgfmt = _(" %u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
if (!vacrel->do_failsafe)
if (!vacrel->failsafe_active)
appendStringInfoString(&buf, _("index scan bypassed:"));
else
appendStringInfoString(&buf, _("index scan bypassed by failsafe:"));
@ -893,8 +920,7 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
next_fsm_block_to_vacuum;
PGRUsage ru0;
Buffer vmbuffer = InvalidBuffer;
bool skipping_blocks,
have_vacuumed_indexes = false;
bool skipping_blocks;
StringInfoData buf;
const int initprog_index[] = {
PROGRESS_VACUUM_PHASE,
@ -1048,7 +1074,7 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
* scanning of last page.
*/
#define FORCE_CHECK_PAGE() \
(blkno == nblocks - 1 && should_attempt_truncation(vacrel, params))
(blkno == nblocks - 1 && should_attempt_truncation(vacrel))
pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
@ -1166,8 +1192,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
}
/* Remove the collected garbage tuples from table and indexes */
lazy_vacuum(vacrel, false);
have_vacuumed_indexes = true;
vacrel->consider_bypass_optimization = false;
lazy_vacuum(vacrel);
/*
* Vacuum the Free Space Map to make newly-freed space visible on
@ -1579,7 +1605,7 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
/* If any tuples need to be deleted, perform final vacuum cycle */
if (dead_tuples->num_tuples > 0)
lazy_vacuum(vacrel, !have_vacuumed_indexes);
lazy_vacuum(vacrel);
/*
* Vacuum the remainder of the Free Space Map. We must do this whether or
@ -2064,9 +2090,9 @@ retry:
* wraparound.
*/
static void
lazy_vacuum(LVRelState *vacrel, bool onecall)
lazy_vacuum(LVRelState *vacrel)
{
bool do_bypass_optimization;
bool bypass;
/* Should not end up here with no indexes */
Assert(vacrel->nindexes > 0);
@ -2099,8 +2125,8 @@ lazy_vacuum(LVRelState *vacrel, bool onecall)
* It's far easier to ensure that 99%+ of all UPDATEs against a table use
* HOT through careful tuning.
*/
do_bypass_optimization = false;
if (onecall && vacrel->rel_pages > 0)
bypass = false;
if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
{
BlockNumber threshold;
@ -2132,12 +2158,11 @@ lazy_vacuum(LVRelState *vacrel, bool onecall)
* expanded to cover more cases then this may need to be reconsidered.
*/
threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
do_bypass_optimization =
(vacrel->lpdead_item_pages < threshold &&
vacrel->lpdead_items < MAXDEADTUPLES(32L * 1024L * 1024L));
bypass = (vacrel->lpdead_item_pages < threshold &&
vacrel->lpdead_items < MAXDEADTUPLES(32L * 1024L * 1024L));
}
if (do_bypass_optimization)
if (bypass)
{
/*
* There are almost zero TIDs. Behave as if there were precisely
@ -2177,7 +2202,7 @@ lazy_vacuum(LVRelState *vacrel, bool onecall)
* vacuuming or heap vacuuming. This VACUUM operation won't end up
* back here again.
*/
Assert(vacrel->do_failsafe);
Assert(vacrel->failsafe_active);
}
/*
@ -2259,7 +2284,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
*/
Assert(vacrel->num_index_scans > 0 ||
vacrel->dead_tuples->num_tuples == vacrel->lpdead_items);
Assert(allindexes || vacrel->do_failsafe);
Assert(allindexes || vacrel->failsafe_active);
/*
* Increase and report the number of index scans.
@ -2580,7 +2605,7 @@ static bool
lazy_check_wraparound_failsafe(LVRelState *vacrel)
{
/* Don't warn more than once per VACUUM */
if (vacrel->do_failsafe)
if (vacrel->failsafe_active)
return true;
if (unlikely(vacuum_xid_failsafe_check(vacrel->relfrozenxid,
@ -2589,9 +2614,12 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
Assert(vacrel->do_index_vacuuming);
Assert(vacrel->do_index_cleanup);
vacrel->failsafe_active = true;
/* Disable index vacuuming, index cleanup, and heap rel truncation */
vacrel->do_index_vacuuming = false;
vacrel->do_index_cleanup = false;
vacrel->do_failsafe = true;
vacrel->do_rel_truncate = false;
ereport(WARNING,
(errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
@ -3136,14 +3164,11 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat,
* careful to depend only on fields that lazy_scan_heap updates on-the-fly.
*/
static bool
should_attempt_truncation(LVRelState *vacrel, VacuumParams *params)
should_attempt_truncation(LVRelState *vacrel)
{
BlockNumber possibly_freeable;
if (params->truncate == VACOPT_TERNARY_DISABLED)
return false;
if (vacrel->do_failsafe)
if (!vacrel->do_rel_truncate || vacrel->failsafe_active)
return false;
possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
@ -3207,7 +3232,6 @@ lazy_truncate_heap(LVRelState *vacrel)
* We failed to establish the lock in the specified number of
* retries. This means we give up truncating.
*/
lock_waiter_detected = true;
ereport(elevel,
(errmsg("\"%s\": stopping truncate due to conflicting lock request",
vacrel->relname)));
@ -3399,9 +3423,8 @@ count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
/*
* Note: any non-unused item should be taken as a reason to keep
* this page. We formerly thought that DEAD tuples could be
* thrown away, but that's not so, because we'd not have cleaned
* out their index entries.
* this page. Even an LP_DEAD item makes truncation unsafe, since
* we must not have cleaned out its index entries.
*/
if (ItemIdIsUsed(itemid))
{

View File

@ -88,7 +88,7 @@ static void vac_truncate_clog(TransactionId frozenXID,
MultiXactId lastSaneMinMulti);
static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params);
static double compute_parallel_delay(void);
static VacOptTernaryValue get_vacopt_ternary_value(DefElem *def);
static VacOptValue get_vacoptval_from_boolean(DefElem *def);
/*
* Primary entry point for manual VACUUM and ANALYZE commands
@ -109,9 +109,9 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
bool process_toast = true;
ListCell *lc;
/* Set default value */
params.index_cleanup = VACOPT_TERNARY_DEFAULT;
params.truncate = VACOPT_TERNARY_DEFAULT;
/* index_cleanup and truncate values unspecified for now */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
params.truncate = VACOPTVALUE_UNSPECIFIED;
/* By default parallel vacuum is enabled */
params.nworkers = 0;
@ -142,11 +142,25 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
else if (strcmp(opt->defname, "disable_page_skipping") == 0)
disable_page_skipping = defGetBoolean(opt);
else if (strcmp(opt->defname, "index_cleanup") == 0)
params.index_cleanup = get_vacopt_ternary_value(opt);
{
/* Interpret no string as the default, which is 'auto' */
if (!opt->arg)
params.index_cleanup = VACOPTVALUE_AUTO;
else
{
char *sval = defGetString(opt);
/* Try matching on 'auto' string, or fall back on boolean */
if (pg_strcasecmp(sval, "auto") == 0)
params.index_cleanup = VACOPTVALUE_AUTO;
else
params.index_cleanup = get_vacoptval_from_boolean(opt);
}
}
else if (strcmp(opt->defname, "process_toast") == 0)
process_toast = defGetBoolean(opt);
else if (strcmp(opt->defname, "truncate") == 0)
params.truncate = get_vacopt_ternary_value(opt);
params.truncate = get_vacoptval_from_boolean(opt);
else if (strcmp(opt->defname, "parallel") == 0)
{
if (opt->arg == NULL)
@ -1938,24 +1952,43 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
lockrelid = rel->rd_lockInfo.lockRelId;
LockRelationIdForSession(&lockrelid, lmode);
/* Set index cleanup option based on reloptions if not yet */
if (params->index_cleanup == VACOPT_TERNARY_DEFAULT)
/*
* Set index_cleanup option based on index_cleanup reloption if it wasn't
* specified in VACUUM command, or when running in an autovacuum worker
*/
if (params->index_cleanup == VACOPTVALUE_UNSPECIFIED)
{
if (rel->rd_options == NULL ||
((StdRdOptions *) rel->rd_options)->vacuum_index_cleanup)
params->index_cleanup = VACOPT_TERNARY_ENABLED;
StdRdOptIndexCleanup vacuum_index_cleanup;
if (rel->rd_options == NULL)
vacuum_index_cleanup = STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO;
else
params->index_cleanup = VACOPT_TERNARY_DISABLED;
vacuum_index_cleanup =
((StdRdOptions *) rel->rd_options)->vacuum_index_cleanup;
if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO)
params->index_cleanup = VACOPTVALUE_AUTO;
else if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON)
params->index_cleanup = VACOPTVALUE_ENABLED;
else
{
Assert(vacuum_index_cleanup ==
STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF);
params->index_cleanup = VACOPTVALUE_DISABLED;
}
}
/* Set truncate option based on reloptions if not yet */
if (params->truncate == VACOPT_TERNARY_DEFAULT)
/*
* Set truncate option based on truncate reloption if it wasn't specified
* in VACUUM command, or when running in an autovacuum worker
*/
if (params->truncate == VACOPTVALUE_UNSPECIFIED)
{
if (rel->rd_options == NULL ||
((StdRdOptions *) rel->rd_options)->vacuum_truncate)
params->truncate = VACOPT_TERNARY_ENABLED;
params->truncate = VACOPTVALUE_ENABLED;
else
params->truncate = VACOPT_TERNARY_DISABLED;
params->truncate = VACOPTVALUE_DISABLED;
}
/*
@ -2217,11 +2250,11 @@ compute_parallel_delay(void)
/*
* A wrapper function of defGetBoolean().
*
* This function returns VACOPT_TERNARY_ENABLED and VACOPT_TERNARY_DISABLED
* instead of true and false.
* This function returns VACOPTVALUE_ENABLED and VACOPTVALUE_DISABLED instead
* of true and false.
*/
static VacOptTernaryValue
get_vacopt_ternary_value(DefElem *def)
static VacOptValue
get_vacoptval_from_boolean(DefElem *def)
{
return defGetBoolean(def) ? VACOPT_TERNARY_ENABLED : VACOPT_TERNARY_DISABLED;
return defGetBoolean(def) ? VACOPTVALUE_ENABLED : VACOPTVALUE_DISABLED;
}

View File

@ -2976,8 +2976,14 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
tab->at_params.options = (dovacuum ? VACOPT_VACUUM : 0) |
(doanalyze ? VACOPT_ANALYZE : 0) |
(!wraparound ? VACOPT_SKIP_LOCKED : 0);
tab->at_params.index_cleanup = VACOPT_TERNARY_DEFAULT;
tab->at_params.truncate = VACOPT_TERNARY_DEFAULT;
/*
* index_cleanup and truncate are unspecified at first in autovacuum.
* They will be filled in with usable values using their reloptions
* (or reloption defaults) later.
*/
tab->at_params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
tab->at_params.truncate = VACOPTVALUE_UNSPECIFIED;
/* As of now, we don't support parallel vacuum for autovacuum */
tab->at_params.nworkers = -1;
tab->at_params.freeze_min_age = freeze_min_age;