mirror of
https://github.com/postgres/postgres.git
synced 2025-11-09 06:21:09 +03:00
Clean up API for ambulkdelete/amvacuumcleanup as per today's discussion.
This formulation requires every AM to provide amvacuumcleanup, unlike before, but it's surely a whole lot cleaner. Also, add an 'amstorage' column to pg_am so that we can get rid of hardwired knowledge in DefineOpClass().
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.1 2006/05/02 11:28:54 teodor Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.2 2006/05/02 22:25:10 tgl Exp $
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
@@ -474,17 +474,25 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3
|
||||
|
||||
Datum
|
||||
ginbulkdelete(PG_FUNCTION_ARGS) {
|
||||
Relation index = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(3);
|
||||
Relation index = info->index;
|
||||
BlockNumber blkno = GIN_ROOT_BLKNO;
|
||||
GinVacuumState gvs;
|
||||
Buffer buffer;
|
||||
BlockNumber rootOfPostingTree[ BLCKSZ/ (sizeof(IndexTupleData)+sizeof(ItemId)) ];
|
||||
uint32 nRoot;
|
||||
|
||||
/* first time through? */
|
||||
if (stats == NULL)
|
||||
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
/* we'll re-count the tuples each time */
|
||||
stats->num_index_tuples = 0;
|
||||
|
||||
gvs.index = index;
|
||||
gvs.result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
gvs.result = stats;
|
||||
gvs.callback = callback;
|
||||
gvs.callback_state = callback_state;
|
||||
initGinState(&gvs.ginstate, index);
|
||||
@@ -564,9 +572,9 @@ ginbulkdelete(PG_FUNCTION_ARGS) {
|
||||
|
||||
Datum
|
||||
ginvacuumcleanup(PG_FUNCTION_ARGS) {
|
||||
Relation index = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexVacuumCleanupInfo *info = (IndexVacuumCleanupInfo *) PG_GETARG_POINTER(1);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(2);
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
Relation index = info->index;
|
||||
bool needLock = !RELATION_IS_LOCAL(index);
|
||||
BlockNumber npages,
|
||||
blkno;
|
||||
@@ -576,6 +584,15 @@ ginvacuumcleanup(PG_FUNCTION_ARGS) {
|
||||
BlockNumber lastBlock = GIN_ROOT_BLKNO,
|
||||
lastFilledBlock = GIN_ROOT_BLKNO;
|
||||
|
||||
/* Set up all-zero stats if ginbulkdelete wasn't called */
|
||||
if (stats == NULL)
|
||||
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
/*
|
||||
* XXX we always report the heap tuple count as the number of index
|
||||
* entries. This is bogus if the index is partial, but it's real hard
|
||||
* to tell how many distinct heap entries are referenced by a GIN index.
|
||||
*/
|
||||
stats->num_index_tuples = info->num_heap_tuples;
|
||||
|
||||
if (info->vacuum_full) {
|
||||
LockRelation(index, AccessExclusiveLock);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.18 2006/03/31 23:32:05 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.19 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -343,9 +343,9 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
|
||||
Datum
|
||||
gistvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexVacuumCleanupInfo *info = (IndexVacuumCleanupInfo *) PG_GETARG_POINTER(1);
|
||||
GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(2);
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
Relation rel = info->index;
|
||||
BlockNumber npages,
|
||||
blkno;
|
||||
BlockNumber nFreePages,
|
||||
@@ -355,6 +355,19 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
lastFilledBlock = GIST_ROOT_BLKNO;
|
||||
bool needLock;
|
||||
|
||||
/* Set up all-zero stats if gistbulkdelete wasn't called */
|
||||
if (stats == NULL)
|
||||
{
|
||||
stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));
|
||||
/* use heap's tuple count */
|
||||
Assert(info->num_heap_tuples >= 0);
|
||||
stats->std.num_index_tuples = info->num_heap_tuples;
|
||||
/*
|
||||
* XXX the above is wrong if index is partial. Would it be OK to
|
||||
* just return NULL, or is there work we must do below?
|
||||
*/
|
||||
}
|
||||
|
||||
/* gistVacuumUpdate may cause hard work */
|
||||
if (info->vacuum_full)
|
||||
{
|
||||
@@ -460,13 +473,6 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
if (info->vacuum_full)
|
||||
UnlockRelation(rel, AccessExclusiveLock);
|
||||
|
||||
/* if gistbulkdelete skipped the scan, use heap's tuple count */
|
||||
if (stats->std.num_index_tuples < 0)
|
||||
{
|
||||
Assert(info->num_heap_tuples >= 0);
|
||||
stats->std.num_index_tuples = info->num_heap_tuples;
|
||||
}
|
||||
|
||||
PG_RETURN_POINTER(stats);
|
||||
}
|
||||
|
||||
@@ -509,36 +515,22 @@ pushStackIfSplited(Page page, GistBDItem *stack)
|
||||
Datum
|
||||
gistbulkdelete(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
||||
GistBulkDeleteResult *result;
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(3);
|
||||
Relation rel = info->index;
|
||||
GistBDItem *stack,
|
||||
*ptr;
|
||||
bool needLock;
|
||||
|
||||
result = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));
|
||||
/* first time through? */
|
||||
if (stats == NULL)
|
||||
stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));
|
||||
/* we'll re-count the tuples each time */
|
||||
stats->std.num_index_tuples = 0;
|
||||
|
||||
/*
|
||||
* We can skip the scan entirely if there's nothing to delete (indicated
|
||||
* by callback_state == NULL) and the index isn't partial. For a partial
|
||||
* index we must scan in order to derive a trustworthy tuple count.
|
||||
*
|
||||
* XXX as of PG 8.2 this is dead code because GIST indexes are always
|
||||
* effectively partial ... but keep it anyway in case our null-handling
|
||||
* gets fixed.
|
||||
*/
|
||||
if (callback_state || vac_is_partial_index(rel))
|
||||
{
|
||||
stack = (GistBDItem *) palloc0(sizeof(GistBDItem));
|
||||
stack->blkno = GIST_ROOT_BLKNO;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* skip scan and set flag for gistvacuumcleanup */
|
||||
stack = NULL;
|
||||
result->std.num_index_tuples = -1;
|
||||
}
|
||||
stack = (GistBDItem *) palloc0(sizeof(GistBDItem));
|
||||
stack->blkno = GIST_ROOT_BLKNO;
|
||||
|
||||
while (stack)
|
||||
{
|
||||
@@ -601,11 +593,11 @@ gistbulkdelete(PG_FUNCTION_ARGS)
|
||||
i--;
|
||||
maxoff--;
|
||||
ntodelete++;
|
||||
result->std.tuples_removed += 1;
|
||||
stats->std.tuples_removed += 1;
|
||||
Assert(maxoff == PageGetMaxOffsetNumber(page));
|
||||
}
|
||||
else
|
||||
result->std.num_index_tuples += 1;
|
||||
stats->std.num_index_tuples += 1;
|
||||
}
|
||||
|
||||
if (ntodelete)
|
||||
@@ -658,7 +650,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
|
||||
stack->next = ptr;
|
||||
|
||||
if (GistTupleIsInvalid(idxtuple))
|
||||
result->needFullVacuum = true;
|
||||
stats->needFullVacuum = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -671,13 +663,5 @@ gistbulkdelete(PG_FUNCTION_ARGS)
|
||||
vacuum_delay_point();
|
||||
}
|
||||
|
||||
needLock = !RELATION_IS_LOCAL(rel);
|
||||
|
||||
if (needLock)
|
||||
LockRelationForExtension(rel, ExclusiveLock);
|
||||
result->std.num_pages = RelationGetNumberOfBlocks(rel);
|
||||
if (needLock)
|
||||
UnlockRelationForExtension(rel, ExclusiveLock);
|
||||
|
||||
PG_RETURN_POINTER(result);
|
||||
PG_RETURN_POINTER(stats);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.88 2006/03/24 04:32:12 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.89 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
@@ -478,11 +478,11 @@ hashrestrpos(PG_FUNCTION_ARGS)
|
||||
Datum
|
||||
hashbulkdelete(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
||||
IndexBulkDeleteResult *result;
|
||||
BlockNumber num_pages;
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(3);
|
||||
Relation rel = info->index;
|
||||
double tuples_removed;
|
||||
double num_index_tuples;
|
||||
double orig_ntuples;
|
||||
@@ -517,18 +517,6 @@ hashbulkdelete(PG_FUNCTION_ARGS)
|
||||
cur_maxbucket = orig_maxbucket;
|
||||
|
||||
loop_top:
|
||||
|
||||
/*
|
||||
* If we don't have anything to delete, skip the scan, and report the
|
||||
* number of tuples shown in the metapage. (Unlike btree and gist,
|
||||
* we can trust this number even for a partial index.)
|
||||
*/
|
||||
if (!callback_state)
|
||||
{
|
||||
cur_bucket = cur_maxbucket + 1;
|
||||
num_index_tuples = local_metapage.hashm_ntuples;
|
||||
}
|
||||
|
||||
while (cur_bucket <= cur_maxbucket)
|
||||
{
|
||||
BlockNumber bucket_blkno;
|
||||
@@ -657,14 +645,37 @@ loop_top:
|
||||
_hash_wrtbuf(rel, metabuf);
|
||||
|
||||
/* return statistics */
|
||||
if (stats == NULL)
|
||||
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
stats->num_index_tuples = num_index_tuples;
|
||||
stats->tuples_removed += tuples_removed;
|
||||
/* hashvacuumcleanup will fill in num_pages */
|
||||
|
||||
PG_RETURN_POINTER(stats);
|
||||
}
|
||||
|
||||
/*
|
||||
* Post-VACUUM cleanup.
|
||||
*
|
||||
* Result: a palloc'd struct containing statistical info for VACUUM displays.
|
||||
*/
|
||||
Datum
|
||||
hashvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
{
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
Relation rel = info->index;
|
||||
BlockNumber num_pages;
|
||||
|
||||
/* If hashbulkdelete wasn't called, return NULL signifying no change */
|
||||
if (stats == NULL)
|
||||
PG_RETURN_POINTER(NULL);
|
||||
|
||||
/* update statistics */
|
||||
num_pages = RelationGetNumberOfBlocks(rel);
|
||||
stats->num_pages = num_pages;
|
||||
|
||||
result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
result->num_pages = num_pages;
|
||||
result->num_index_tuples = num_index_tuples;
|
||||
result->tuples_removed = tuples_removed;
|
||||
|
||||
PG_RETURN_POINTER(result);
|
||||
PG_RETURN_POINTER(stats);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.91 2006/03/05 15:58:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.92 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* index_open - open an index relation by relation OID
|
||||
@@ -684,19 +684,17 @@ index_getmulti(IndexScanDesc scan,
|
||||
*
|
||||
* callback routine tells whether a given main-heap tuple is
|
||||
* to be deleted
|
||||
*
|
||||
* if callback_state is NULL then there are no tuples to be deleted;
|
||||
* index AM can choose to avoid work in this case, but must still
|
||||
* follow the protocol of returning statistical info.
|
||||
*
|
||||
* return value is an optional palloc'd struct of statistics
|
||||
* ----------------
|
||||
*/
|
||||
IndexBulkDeleteResult *
|
||||
index_bulk_delete(Relation indexRelation,
|
||||
index_bulk_delete(IndexVacuumInfo *info,
|
||||
IndexBulkDeleteResult *stats,
|
||||
IndexBulkDeleteCallback callback,
|
||||
void *callback_state)
|
||||
{
|
||||
Relation indexRelation = info->index;
|
||||
FmgrInfo *procedure;
|
||||
IndexBulkDeleteResult *result;
|
||||
|
||||
@@ -704,8 +702,9 @@ index_bulk_delete(Relation indexRelation,
|
||||
GET_REL_PROCEDURE(ambulkdelete);
|
||||
|
||||
result = (IndexBulkDeleteResult *)
|
||||
DatumGetPointer(FunctionCall3(procedure,
|
||||
PointerGetDatum(indexRelation),
|
||||
DatumGetPointer(FunctionCall4(procedure,
|
||||
PointerGetDatum(info),
|
||||
PointerGetDatum(stats),
|
||||
PointerGetDatum((Pointer) callback),
|
||||
PointerGetDatum(callback_state)));
|
||||
|
||||
@@ -719,26 +718,20 @@ index_bulk_delete(Relation indexRelation,
|
||||
* ----------------
|
||||
*/
|
||||
IndexBulkDeleteResult *
|
||||
index_vacuum_cleanup(Relation indexRelation,
|
||||
IndexVacuumCleanupInfo *info,
|
||||
index_vacuum_cleanup(IndexVacuumInfo *info,
|
||||
IndexBulkDeleteResult *stats)
|
||||
{
|
||||
Relation indexRelation = info->index;
|
||||
FmgrInfo *procedure;
|
||||
IndexBulkDeleteResult *result;
|
||||
|
||||
RELATION_CHECKS;
|
||||
|
||||
/* It's okay for an index AM not to have a vacuumcleanup procedure */
|
||||
if (!RegProcedureIsValid(indexRelation->rd_am->amvacuumcleanup))
|
||||
return stats;
|
||||
|
||||
GET_REL_PROCEDURE(amvacuumcleanup);
|
||||
|
||||
result = (IndexBulkDeleteResult *)
|
||||
DatumGetPointer(FunctionCall3(procedure,
|
||||
PointerGetDatum(indexRelation),
|
||||
PointerGetDatum((Pointer) info),
|
||||
PointerGetDatum((Pointer) stats)));
|
||||
DatumGetPointer(FunctionCall2(procedure,
|
||||
PointerGetDatum(info),
|
||||
PointerGetDatum(stats)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.145 2006/04/25 22:46:05 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.146 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -518,15 +518,15 @@ btrestrpos(PG_FUNCTION_ARGS)
|
||||
Datum
|
||||
btbulkdelete(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
||||
IndexBulkDeleteResult *result;
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(3);
|
||||
Relation rel = info->index;
|
||||
double tuples_removed = 0;
|
||||
OffsetNumber deletable[MaxOffsetNumber];
|
||||
int ndeletable;
|
||||
Buffer buf;
|
||||
BlockNumber num_pages;
|
||||
|
||||
/*
|
||||
* The outer loop iterates over index leaf pages, the inner over items on
|
||||
@@ -543,14 +543,8 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
* further to its right, which the indexscan will have no pin on.) We can
|
||||
* skip obtaining exclusive lock on empty pages though, since no indexscan
|
||||
* could be stopped on those.
|
||||
*
|
||||
* We can skip the scan entirely if there's nothing to delete (indicated
|
||||
* by callback_state == NULL).
|
||||
*/
|
||||
if (callback_state)
|
||||
buf = _bt_get_endpoint(rel, 0, false);
|
||||
else
|
||||
buf = InvalidBuffer;
|
||||
buf = _bt_get_endpoint(rel, 0, false);
|
||||
|
||||
if (BufferIsValid(buf)) /* check for empty index */
|
||||
{
|
||||
@@ -626,14 +620,12 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/* return statistics */
|
||||
num_pages = RelationGetNumberOfBlocks(rel);
|
||||
if (stats == NULL)
|
||||
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
stats->tuples_removed += tuples_removed;
|
||||
/* btvacuumcleanup will fill in num_pages and num_index_tuples */
|
||||
|
||||
result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
result->num_pages = num_pages;
|
||||
/* btvacuumcleanup will fill in num_index_tuples */
|
||||
result->tuples_removed = tuples_removed;
|
||||
|
||||
PG_RETURN_POINTER(result);
|
||||
PG_RETURN_POINTER(stats);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -646,9 +638,9 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
Datum
|
||||
btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
||||
IndexVacuumCleanupInfo *info = (IndexVacuumCleanupInfo *) PG_GETARG_POINTER(1);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(2);
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
Relation rel = info->index;
|
||||
BlockNumber num_pages;
|
||||
BlockNumber blkno;
|
||||
BlockNumber *freePages;
|
||||
@@ -660,7 +652,9 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
MemoryContext oldcontext;
|
||||
bool needLock;
|
||||
|
||||
Assert(stats != NULL);
|
||||
/* Set up all-zero stats if btbulkdelete wasn't called */
|
||||
if (stats == NULL)
|
||||
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
|
||||
/*
|
||||
* First find out the number of pages in the index. We must acquire the
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.146 2006/05/02 15:45:37 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.147 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -339,6 +339,12 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
|
||||
errmsg("cannot cluster on partial index \"%s\"",
|
||||
RelationGetRelationName(OldIndex))));
|
||||
|
||||
if (!OldIndex->rd_am->amclusterable)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot cluster on index \"%s\" because access method does not support clustering",
|
||||
RelationGetRelationName(OldIndex))));
|
||||
|
||||
if (!OldIndex->rd_am->amindexnulls)
|
||||
{
|
||||
AttrNumber colno;
|
||||
@@ -376,12 +382,6 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
|
||||
RelationGetRelationName(OldIndex))));
|
||||
}
|
||||
|
||||
if (!OldIndex->rd_am->amclusterable)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot cluster on index \"%s\" because access method does not support clustering",
|
||||
RelationGetRelationName(OldIndex))));
|
||||
|
||||
/*
|
||||
* Disallow clustering system relations. This will definitely NOT work
|
||||
* for shared relations (we have no way to update pg_class rows in other
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.44 2006/05/02 11:28:54 teodor Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.45 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -77,11 +77,13 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
opclassoid; /* oid of opclass we create */
|
||||
int numOperators, /* amstrategies value */
|
||||
numProcs; /* amsupport value */
|
||||
bool amstorage; /* amstorage flag */
|
||||
List *operators; /* OpClassMember list for operators */
|
||||
List *procedures; /* OpClassMember list for support procs */
|
||||
ListCell *l;
|
||||
Relation rel;
|
||||
HeapTuple tup;
|
||||
Form_pg_am pg_am;
|
||||
Datum values[Natts_pg_opclass];
|
||||
char nulls[Natts_pg_opclass];
|
||||
AclResult aclresult;
|
||||
@@ -111,8 +113,10 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
stmt->amname)));
|
||||
|
||||
amoid = HeapTupleGetOid(tup);
|
||||
numOperators = ((Form_pg_am) GETSTRUCT(tup))->amstrategies;
|
||||
numProcs = ((Form_pg_am) GETSTRUCT(tup))->amsupport;
|
||||
pg_am = (Form_pg_am) GETSTRUCT(tup);
|
||||
numOperators = pg_am->amstrategies;
|
||||
numProcs = pg_am->amsupport;
|
||||
amstorage = pg_am->amstorage;
|
||||
|
||||
/* XXX Should we make any privilege check against the AM? */
|
||||
|
||||
@@ -270,19 +274,11 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
/* Just drop the spec if same as column datatype */
|
||||
if (storageoid == typeoid)
|
||||
storageoid = InvalidOid;
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Currently, only GiST and GIN allows storagetype different from
|
||||
* datatype. This hardcoded test should be eliminated in favor of
|
||||
* adding another boolean column to pg_am ...
|
||||
*/
|
||||
if (!(amoid == GIST_AM_OID || amoid == GIN_AM_OID))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("storage type may not be different from data type for access method \"%s\"",
|
||||
stmt->amname)));
|
||||
}
|
||||
else if (!amstorage)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("storage type may not be different from data type for access method \"%s\"",
|
||||
stmt->amname)));
|
||||
}
|
||||
|
||||
rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.327 2006/05/02 11:28:54 teodor Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.328 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -230,7 +230,6 @@ static void vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
double num_tuples, int keep_tuples);
|
||||
static void scan_index(Relation indrel, double num_tuples);
|
||||
static bool tid_reaped(ItemPointer itemptr, void *state);
|
||||
static bool dummy_tid_reaped(ItemPointer itemptr, void *state);
|
||||
static void vac_update_fsm(Relation onerel, VacPageList fraged_pages,
|
||||
BlockNumber rel_pages);
|
||||
static VacPage copy_vac_page(VacPage vacpage);
|
||||
@@ -2933,7 +2932,7 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
|
||||
}
|
||||
|
||||
/*
|
||||
* scan_index() -- scan one index relation to update statistic.
|
||||
* scan_index() -- scan one index relation to update pg_class statistics.
|
||||
*
|
||||
* We use this when we have no deletions to do.
|
||||
*/
|
||||
@@ -2941,25 +2940,17 @@ static void
|
||||
scan_index(Relation indrel, double num_tuples)
|
||||
{
|
||||
IndexBulkDeleteResult *stats;
|
||||
IndexVacuumCleanupInfo vcinfo;
|
||||
IndexVacuumInfo ivinfo;
|
||||
PGRUsage ru0;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
|
||||
/*
|
||||
* Even though we're not planning to delete anything, we use the
|
||||
* ambulkdelete call, because (a) the scan happens within the index AM for
|
||||
* more speed, and (b) it may want to pass private statistics to the
|
||||
* amvacuumcleanup call.
|
||||
*/
|
||||
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
|
||||
ivinfo.index = indrel;
|
||||
ivinfo.vacuum_full = true;
|
||||
ivinfo.message_level = elevel;
|
||||
ivinfo.num_heap_tuples = num_tuples;
|
||||
|
||||
/* Do post-VACUUM cleanup, even though we deleted nothing */
|
||||
vcinfo.vacuum_full = true;
|
||||
vcinfo.message_level = elevel;
|
||||
vcinfo.num_heap_tuples = num_tuples;
|
||||
|
||||
stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
|
||||
stats = index_vacuum_cleanup(&ivinfo, NULL);
|
||||
|
||||
if (!stats)
|
||||
return;
|
||||
@@ -2982,16 +2973,7 @@ scan_index(Relation indrel, double num_tuples)
|
||||
/*
|
||||
* Check for tuple count mismatch. If the index is partial, then it's OK
|
||||
* for it to have fewer tuples than the heap; else we got trouble.
|
||||
*
|
||||
* XXX Hack. Since GIN stores every pointer to heap several times and
|
||||
* counting num_index_tuples during vacuum is very comlpex and slow
|
||||
* we just copy num_tuples to num_index_tuples as upper limit to avoid
|
||||
* WARNING and optimizer mistakes.
|
||||
*/
|
||||
if ( indrel->rd_rel->relam == GIN_AM_OID )
|
||||
{
|
||||
stats->num_index_tuples = num_tuples;
|
||||
} else
|
||||
if (stats->num_index_tuples != num_tuples)
|
||||
{
|
||||
if (stats->num_index_tuples > num_tuples ||
|
||||
@@ -3023,20 +3005,21 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
double num_tuples, int keep_tuples)
|
||||
{
|
||||
IndexBulkDeleteResult *stats;
|
||||
IndexVacuumCleanupInfo vcinfo;
|
||||
IndexVacuumInfo ivinfo;
|
||||
PGRUsage ru0;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
|
||||
ivinfo.index = indrel;
|
||||
ivinfo.vacuum_full = true;
|
||||
ivinfo.message_level = elevel;
|
||||
ivinfo.num_heap_tuples = num_tuples + keep_tuples;
|
||||
|
||||
/* Do bulk deletion */
|
||||
stats = index_bulk_delete(indrel, tid_reaped, (void *) vacpagelist);
|
||||
stats = index_bulk_delete(&ivinfo, NULL, tid_reaped, (void *) vacpagelist);
|
||||
|
||||
/* Do post-VACUUM cleanup */
|
||||
vcinfo.vacuum_full = true;
|
||||
vcinfo.message_level = elevel;
|
||||
vcinfo.num_heap_tuples = num_tuples + keep_tuples;
|
||||
|
||||
stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
|
||||
stats = index_vacuum_cleanup(&ivinfo, stats);
|
||||
|
||||
if (!stats)
|
||||
return;
|
||||
@@ -3061,16 +3044,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
/*
|
||||
* Check for tuple count mismatch. If the index is partial, then it's OK
|
||||
* for it to have fewer tuples than the heap; else we got trouble.
|
||||
*
|
||||
* XXX Hack. Since GIN stores every pointer to heap several times and
|
||||
* counting num_index_tuples during vacuum is very comlpex and slow
|
||||
* we just copy num_tuples to num_index_tuples as upper limit to avoid
|
||||
* WARNING and optimizer mistakes.
|
||||
*/
|
||||
if ( indrel->rd_rel->relam == GIN_AM_OID )
|
||||
{
|
||||
stats->num_index_tuples = num_tuples;
|
||||
} else
|
||||
if (stats->num_index_tuples != num_tuples + keep_tuples)
|
||||
{
|
||||
if (stats->num_index_tuples > num_tuples + keep_tuples ||
|
||||
@@ -3137,15 +3111,6 @@ tid_reaped(ItemPointer itemptr, void *state)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dummy version for scan_index.
|
||||
*/
|
||||
static bool
|
||||
dummy_tid_reaped(ItemPointer itemptr, void *state)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the shared Free Space Map with the info we now have about
|
||||
* free space in the relation, discarding any old info the map may have.
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.69 2006/03/31 23:32:06 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.70 2006/05/02 22:25:10 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -96,11 +96,12 @@ static TransactionId FreezeLimit;
|
||||
static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
Relation *Irel, int nindexes);
|
||||
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
|
||||
static void lazy_scan_index(Relation indrel, LVRelStats *vacrelstats);
|
||||
static void lazy_vacuum_index(Relation indrel,
|
||||
double *index_tups_vacuumed,
|
||||
BlockNumber *index_pages_removed,
|
||||
LVRelStats *vacrelstats);
|
||||
IndexBulkDeleteResult **stats,
|
||||
LVRelStats *vacrelstats);
|
||||
static void lazy_cleanup_index(Relation indrel,
|
||||
IndexBulkDeleteResult *stats,
|
||||
LVRelStats *vacrelstats);
|
||||
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
|
||||
int tupindex, LVRelStats *vacrelstats);
|
||||
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
|
||||
@@ -112,7 +113,6 @@ static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
|
||||
static void lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
BlockNumber page, Size avail);
|
||||
static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
|
||||
static bool dummy_tid_reaped(ItemPointer itemptr, void *state);
|
||||
static void lazy_update_fsm(Relation onerel, LVRelStats *vacrelstats);
|
||||
static int vac_cmp_itemptr(const void *left, const void *right);
|
||||
static int vac_cmp_page_spaces(const void *left, const void *right);
|
||||
@@ -207,9 +207,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
tups_vacuumed,
|
||||
nkeep,
|
||||
nunused;
|
||||
double *index_tups_vacuumed;
|
||||
BlockNumber *index_pages_removed;
|
||||
bool did_vacuum_index = false;
|
||||
IndexBulkDeleteResult **indstats;
|
||||
int i;
|
||||
PGRUsage ru0;
|
||||
|
||||
@@ -224,15 +222,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
empty_pages = 0;
|
||||
num_tuples = tups_vacuumed = nkeep = nunused = 0;
|
||||
|
||||
/*
|
||||
* Because index vacuuming is done in multiple passes, we have to keep
|
||||
* track of the total number of rows and pages removed from each index.
|
||||
* index_tups_vacuumed[i] is the number removed so far from the i'th
|
||||
* index. (For partial indexes this could well be different from
|
||||
* tups_vacuumed.) Likewise for index_pages_removed[i].
|
||||
*/
|
||||
index_tups_vacuumed = (double *) palloc0(nindexes * sizeof(double));
|
||||
index_pages_removed = (BlockNumber *) palloc0(nindexes * sizeof(BlockNumber));
|
||||
indstats = (IndexBulkDeleteResult **)
|
||||
palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
|
||||
|
||||
nblocks = RelationGetNumberOfBlocks(onerel);
|
||||
vacrelstats->rel_pages = nblocks;
|
||||
@@ -263,10 +254,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
/* Remove index entries */
|
||||
for (i = 0; i < nindexes; i++)
|
||||
lazy_vacuum_index(Irel[i],
|
||||
&index_tups_vacuumed[i],
|
||||
&index_pages_removed[i],
|
||||
&indstats[i],
|
||||
vacrelstats);
|
||||
did_vacuum_index = true;
|
||||
/* Remove tuples from heap */
|
||||
lazy_vacuum_heap(onerel, vacrelstats);
|
||||
/* Forget the now-vacuumed tuples, and press on */
|
||||
@@ -454,18 +443,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
/* Remove index entries */
|
||||
for (i = 0; i < nindexes; i++)
|
||||
lazy_vacuum_index(Irel[i],
|
||||
&index_tups_vacuumed[i],
|
||||
&index_pages_removed[i],
|
||||
&indstats[i],
|
||||
vacrelstats);
|
||||
/* Remove tuples from heap */
|
||||
lazy_vacuum_heap(onerel, vacrelstats);
|
||||
}
|
||||
else if (!did_vacuum_index)
|
||||
{
|
||||
/* Must do post-vacuum cleanup and statistics update anyway */
|
||||
for (i = 0; i < nindexes; i++)
|
||||
lazy_scan_index(Irel[i], vacrelstats);
|
||||
}
|
||||
|
||||
/* Do post-vacuum cleanup and statistics update for each index */
|
||||
for (i = 0; i < nindexes; i++)
|
||||
lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages",
|
||||
@@ -590,94 +576,18 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
|
||||
return tupindex;
|
||||
}
|
||||
|
||||
/*
|
||||
* lazy_scan_index() -- scan one index relation to update pg_class statistic.
|
||||
*
|
||||
* We use this when we have no deletions to do.
|
||||
*/
|
||||
static void
|
||||
lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
|
||||
{
|
||||
IndexBulkDeleteResult *stats;
|
||||
IndexVacuumCleanupInfo vcinfo;
|
||||
PGRUsage ru0;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
|
||||
/*
|
||||
* Acquire appropriate type of lock on index: must be exclusive if index
|
||||
* AM isn't concurrent-safe.
|
||||
*/
|
||||
if (indrel->rd_am->amconcurrent)
|
||||
LockRelation(indrel, RowExclusiveLock);
|
||||
else
|
||||
LockRelation(indrel, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Even though we're not planning to delete anything, we use the
|
||||
* ambulkdelete call, because (a) the scan happens within the index AM for
|
||||
* more speed, and (b) it may want to pass private statistics to the
|
||||
* amvacuumcleanup call.
|
||||
*/
|
||||
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
|
||||
|
||||
/* Do post-VACUUM cleanup, even though we deleted nothing */
|
||||
vcinfo.vacuum_full = false;
|
||||
vcinfo.message_level = elevel;
|
||||
vcinfo.num_heap_tuples = vacrelstats->rel_tuples;
|
||||
|
||||
stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
|
||||
|
||||
/*
|
||||
* Release lock acquired above.
|
||||
*/
|
||||
if (indrel->rd_am->amconcurrent)
|
||||
UnlockRelation(indrel, RowExclusiveLock);
|
||||
else
|
||||
UnlockRelation(indrel, AccessExclusiveLock);
|
||||
|
||||
if (!stats)
|
||||
return;
|
||||
|
||||
/* now update statistics in pg_class */
|
||||
vac_update_relstats(RelationGetRelid(indrel),
|
||||
stats->num_pages,
|
||||
stats->num_index_tuples,
|
||||
false);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s.",
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
pg_rusage_show(&ru0))));
|
||||
|
||||
pfree(stats);
|
||||
}
|
||||
|
||||
/*
|
||||
* lazy_vacuum_index() -- vacuum one index relation.
|
||||
*
|
||||
* Delete all the index entries pointing to tuples listed in
|
||||
* vacrelstats->dead_tuples.
|
||||
*
|
||||
* Increment *index_tups_vacuumed by the number of index entries
|
||||
* removed, and *index_pages_removed by the number of pages removed.
|
||||
*
|
||||
* Finally, we arrange to update the index relation's statistics in
|
||||
* pg_class.
|
||||
* vacrelstats->dead_tuples, and update running statistics.
|
||||
*/
|
||||
static void
|
||||
lazy_vacuum_index(Relation indrel,
|
||||
double *index_tups_vacuumed,
|
||||
BlockNumber *index_pages_removed,
|
||||
IndexBulkDeleteResult **stats,
|
||||
LVRelStats *vacrelstats)
|
||||
{
|
||||
IndexBulkDeleteResult *stats;
|
||||
IndexVacuumCleanupInfo vcinfo;
|
||||
IndexVacuumInfo ivinfo;
|
||||
PGRUsage ru0;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
@@ -691,17 +601,59 @@ lazy_vacuum_index(Relation indrel,
|
||||
else
|
||||
LockRelation(indrel, AccessExclusiveLock);
|
||||
|
||||
/* Do bulk deletion */
|
||||
stats = index_bulk_delete(indrel, lazy_tid_reaped, (void *) vacrelstats);
|
||||
|
||||
/* Do post-VACUUM cleanup */
|
||||
vcinfo.vacuum_full = false;
|
||||
vcinfo.message_level = elevel;
|
||||
ivinfo.index = indrel;
|
||||
ivinfo.vacuum_full = false;
|
||||
ivinfo.message_level = elevel;
|
||||
/* We don't yet know rel_tuples, so pass -1 */
|
||||
/* index_bulk_delete can't have skipped scan anyway ... */
|
||||
vcinfo.num_heap_tuples = -1;
|
||||
ivinfo.num_heap_tuples = -1;
|
||||
|
||||
stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
|
||||
/* Do bulk deletion */
|
||||
*stats = index_bulk_delete(&ivinfo, *stats,
|
||||
lazy_tid_reaped, (void *) vacrelstats);
|
||||
|
||||
/*
|
||||
* Release lock acquired above.
|
||||
*/
|
||||
if (indrel->rd_am->amconcurrent)
|
||||
UnlockRelation(indrel, RowExclusiveLock);
|
||||
else
|
||||
UnlockRelation(indrel, AccessExclusiveLock);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("scanned index \"%s\" to remove %d row versions",
|
||||
RelationGetRelationName(indrel),
|
||||
vacrelstats->num_dead_tuples),
|
||||
errdetail("%s.", pg_rusage_show(&ru0))));
|
||||
}
|
||||
|
||||
/*
|
||||
* lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
|
||||
*/
|
||||
static void
|
||||
lazy_cleanup_index(Relation indrel,
|
||||
IndexBulkDeleteResult *stats,
|
||||
LVRelStats *vacrelstats)
|
||||
{
|
||||
IndexVacuumInfo ivinfo;
|
||||
PGRUsage ru0;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
|
||||
/*
|
||||
* Acquire appropriate type of lock on index: must be exclusive if index
|
||||
* AM isn't concurrent-safe.
|
||||
*/
|
||||
if (indrel->rd_am->amconcurrent)
|
||||
LockRelation(indrel, RowExclusiveLock);
|
||||
else
|
||||
LockRelation(indrel, AccessExclusiveLock);
|
||||
|
||||
ivinfo.index = indrel;
|
||||
ivinfo.vacuum_full = false;
|
||||
ivinfo.message_level = elevel;
|
||||
ivinfo.num_heap_tuples = vacrelstats->rel_tuples;
|
||||
|
||||
stats = index_vacuum_cleanup(&ivinfo, stats);
|
||||
|
||||
/*
|
||||
* Release lock acquired above.
|
||||
@@ -714,10 +666,6 @@ lazy_vacuum_index(Relation indrel,
|
||||
if (!stats)
|
||||
return;
|
||||
|
||||
/* accumulate total removed over multiple index-cleaning cycles */
|
||||
*index_tups_vacuumed += stats->tuples_removed;
|
||||
*index_pages_removed += stats->pages_removed;
|
||||
|
||||
/* now update statistics in pg_class */
|
||||
vac_update_relstats(RelationGetRelid(indrel),
|
||||
stats->num_pages,
|
||||
@@ -1134,15 +1082,6 @@ lazy_tid_reaped(ItemPointer itemptr, void *state)
|
||||
return (res != NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dummy version for lazy_scan_index.
|
||||
*/
|
||||
static bool
|
||||
dummy_tid_reaped(ItemPointer itemptr, void *state)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the shared Free Space Map with the info we now have about
|
||||
* free space in the relation, discarding any old info the map may have.
|
||||
|
||||
Reference in New Issue
Block a user