1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

Add VACUUM/ANALYZE BUFFER_USAGE_LIMIT option

Add new options to the VACUUM and ANALYZE commands called
BUFFER_USAGE_LIMIT to allow users more control over how large to make the
buffer access strategy that is used to limit the usage of buffers in
shared buffers.  Larger rings can allow VACUUM to run more quickly but
have the drawback of VACUUM possibly evicting more buffers from shared
buffers that might be useful for other queries running on the database.

Here we also add a new GUC named vacuum_buffer_usage_limit which controls
how large to make the access strategy when it's not specified in the
VACUUM/ANALYZE command.  This defaults to 256KB, which is the same size as
the access strategy was prior to this change.  This setting also
controls how large to make the buffer access strategy for autovacuum.

Per idea by Andres Freund.

Author: Melanie Plageman
Reviewed-by: David Rowley
Reviewed-by: Andres Freund
Reviewed-by: Justin Pryzby
Reviewed-by: Bharath Rupireddy
Discussion: https://postgr.es/m/20230111182720.ejifsclfwymw2reb@awork3.anarazel.de
This commit is contained in:
David Rowley
2023-04-07 11:40:31 +12:00
parent 5279e9db8e
commit 1cbbee0338
17 changed files with 322 additions and 25 deletions

View File

@@ -229,12 +229,12 @@ update hint bits). In a scan that modifies every page in the scan, like a
bulk UPDATE or DELETE, the buffers in the ring will always be dirtied and
the ring strategy effectively degrades to the normal strategy.
VACUUM uses a 256KB ring like sequential scans, but dirty pages are not
removed from the ring. Instead, WAL is flushed if needed to allow reuse of
the buffers. Before introducing the buffer ring strategy in 8.3, VACUUM's
buffers were sent to the freelist, which was effectively a buffer ring of 1
buffer, resulting in excessive WAL flushing. Allowing VACUUM to update
256KB between WAL flushes should be more efficient.
VACUUM uses a ring like sequential scans, however, the size of this ring is
controlled by the vacuum_buffer_usage_limit GUC. Dirty pages are not removed
from the ring. Instead, WAL is flushed if needed to allow reuse of the
buffers. Before introducing the buffer ring strategy in 8.3, VACUUM's buffers
were sent to the freelist, which was effectively a buffer ring of 1 buffer,
resulting in excessive WAL flushing.
Bulk writes work similarly to VACUUM. Currently this applies only to
COPY IN and CREATE TABLE AS SELECT. (Might it be interesting to make

View File

@@ -540,8 +540,7 @@ StrategyInitialize(bool init)
BufferAccessStrategy
GetAccessStrategy(BufferAccessStrategyType btype)
{
BufferAccessStrategy strategy;
int nbuffers;
int ring_size_kb;
/*
* Select ring size to use. See buffer/README for rationales.
@@ -556,13 +555,13 @@ GetAccessStrategy(BufferAccessStrategyType btype)
return NULL;
case BAS_BULKREAD:
nbuffers = 256 * 1024 / BLCKSZ;
ring_size_kb = 256;
break;
case BAS_BULKWRITE:
nbuffers = 16 * 1024 * 1024 / BLCKSZ;
ring_size_kb = 16 * 1024;
break;
case BAS_VACUUM:
nbuffers = 256 * 1024 / BLCKSZ;
ring_size_kb = 256;
break;
default:
@@ -571,21 +570,65 @@ GetAccessStrategy(BufferAccessStrategyType btype)
return NULL; /* keep compiler quiet */
}
/* Make sure ring isn't an undue fraction of shared buffers */
nbuffers = Min(NBuffers / 8, nbuffers);
return GetAccessStrategyWithSize(btype, ring_size_kb);
}
/*
* GetAccessStrategyWithSize -- create a BufferAccessStrategy object with a
* number of buffers equivalent to the passed in size.
*
* If the given ring size is 0, no BufferAccessStrategy will be created and
* the function will return NULL. ring_size_kb must not be negative.
*/
BufferAccessStrategy
GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb)
{
int ring_buffers;
BufferAccessStrategy strategy;
Assert(ring_size_kb >= 0);
/* Figure out how many buffers ring_size_kb is */
ring_buffers = ring_size_kb / (BLCKSZ / 1024);
/* 0 means unlimited, so no BufferAccessStrategy required */
if (ring_buffers == 0)
return NULL;
/* Cap to 1/8th of shared_buffers */
ring_buffers = Min(NBuffers / 8, ring_buffers);
/* NBuffers should never be less than 16, so this shouldn't happen */
Assert(ring_buffers > 0);
/* Allocate the object and initialize all elements to zeroes */
strategy = (BufferAccessStrategy)
palloc0(offsetof(BufferAccessStrategyData, buffers) +
nbuffers * sizeof(Buffer));
ring_buffers * sizeof(Buffer));
/* Set fields that don't start out zero */
strategy->btype = btype;
strategy->nbuffers = nbuffers;
strategy->nbuffers = ring_buffers;
return strategy;
}
/*
* GetAccessStrategyBufferCount -- an accessor for the number of buffers in
* the ring
*
* Returns 0 on NULL input to match behavior of GetAccessStrategyWithSize()
* returning NULL with 0 size.
*/
int
GetAccessStrategyBufferCount(BufferAccessStrategy strategy)
{
if (strategy == NULL)
return 0;
return strategy->nbuffers;
}
/*
* FreeAccessStrategy -- release a BufferAccessStrategy object
*