diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index a2999b3bf77..005440eb392 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -98,6 +98,12 @@ */ #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32) +/* + * Size of the prefetch window for lazy vacuum backwards truncation scan. + * Needs to be a power of 2. + */ +#define PREFETCH_SIZE ((BlockNumber) 32) + typedef struct LVRelStats { /* hasindex = true means two-pass strategy; false means one-pass */ @@ -1826,13 +1832,22 @@ static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) { BlockNumber blkno; + BlockNumber prefetchedUntil; instr_time starttime; /* Initialize the starttime if we check for conflicting lock requests */ INSTR_TIME_SET_CURRENT(starttime); - /* Strange coding of loop control is needed because blkno is unsigned */ + /* + * Start checking blocks at what we believe relation end to be and move + * backwards. (Strange coding of loop control is needed because blkno is + * unsigned.) To make the scan faster, we prefetch a few blocks at a time + * in forward direction, so that OS-level readahead can kick in. + */ blkno = vacrelstats->rel_pages; + StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0, + "prefetch size must be power of 2"); + prefetchedUntil = InvalidBlockNumber; while (blkno > vacrelstats->nonempty_pages) { Buffer buf; @@ -1882,6 +1897,21 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) blkno--; + /* If we haven't prefetched this lot yet, do so now. */ + if (prefetchedUntil > blkno) + { + BlockNumber prefetchStart; + BlockNumber pblkno; + + prefetchStart = blkno & ~(PREFETCH_SIZE - 1); + for (pblkno = prefetchStart; pblkno <= blkno; pblkno++) + { + PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno); + CHECK_FOR_INTERRUPTS(); + } + prefetchedUntil = prefetchStart; + } + buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL, vac_strategy);