1
0
mirror of https://github.com/postgres/postgres.git synced 2025-12-18 05:01:01 +03:00

bufmgr: Add one-entry cache for private refcount

The private refcount entry for a buffer is often looked up repeatedly for the
same buffer, e.g. to pin and then unpin a buffer. Benchmarking shows that it's
worthwhile to have a one-entry cache for that case. With that cache in place,
it's worth splitting GetPrivateRefCountEntry() into a small inline
portion (for the cache hit case) and an out-of-line helper for the rest.

This is helpful for some workloads today, but becomes more important in an
upcoming patch that will utilize the private refcount infrastructure to also
store whether the buffer is currently locked, as that increases the rate of
lookups substantially.

Reviewed-by: Melanie Plageman <melanieplageman@gmail.com>
Discussion: https://postgr.es/m/6rgb2nvhyvnszz4ul3wfzlf5rheb2kkwrglthnna7qhe24onwr@vw27225tkyar
This commit is contained in:
Andres Freund
2025-12-14 13:09:43 -05:00
parent edbaaea0a9
commit 30df61990c

View File

@@ -241,6 +241,7 @@ static HTAB *PrivateRefCountHash = NULL;
static int32 PrivateRefCountOverflowed = 0; static int32 PrivateRefCountOverflowed = 0;
static uint32 PrivateRefCountClock = 0; static uint32 PrivateRefCountClock = 0;
static int ReservedRefCountSlot = -1; static int ReservedRefCountSlot = -1;
static int PrivateRefCountEntryLast = -1;
static uint32 MaxProportionalPins; static uint32 MaxProportionalPins;
@@ -374,28 +375,27 @@ NewPrivateRefCountEntry(Buffer buffer)
res->buffer = buffer; res->buffer = buffer;
res->data.refcount = 0; res->data.refcount = 0;
/* update cache for the next lookup */
PrivateRefCountEntryLast = ReservedRefCountSlot;
ReservedRefCountSlot = -1; ReservedRefCountSlot = -1;
return res; return res;
} }
/* /*
* Return the PrivateRefCount entry for the passed buffer. * Slow-path for GetPrivateRefCountEntry(). This is big enough to not be worth
* * inlining. This particularly seems to be true if the compiler is capable of
* Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if * auto-vectorizing the code, as that imposes additional stack-alignment
* do_move is true, and the entry resides in the hashtable the entry is * requirements etc.
* optimized for frequent access by moving it to the array.
*/ */
static inline PrivateRefCountEntry * static pg_noinline PrivateRefCountEntry *
GetPrivateRefCountEntry(Buffer buffer, bool do_move) GetPrivateRefCountEntrySlow(Buffer buffer, bool do_move)
{ {
PrivateRefCountEntry *res; PrivateRefCountEntry *res;
int match = -1; int match = -1;
int i; int i;
Assert(BufferIsValid(buffer));
Assert(!BufferIsLocal(buffer));
/* /*
* First search for references in the array, that'll be sufficient in the * First search for references in the array, that'll be sufficient in the
* majority of cases. * majority of cases.
@@ -409,8 +409,13 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
} }
} }
if (match != -1) if (likely(match != -1))
{
/* update cache for the next lookup */
PrivateRefCountEntryLast = match;
return &PrivateRefCountArray[match]; return &PrivateRefCountArray[match];
}
/* /*
* By here we know that the buffer, if already pinned, isn't residing in * By here we know that the buffer, if already pinned, isn't residing in
@@ -450,6 +455,8 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
free->buffer = buffer; free->buffer = buffer;
free->data = res->data; free->data = res->data;
PrivateRefCountArrayKeys[ReservedRefCountSlot] = buffer; PrivateRefCountArrayKeys[ReservedRefCountSlot] = buffer;
/* update cache for the next lookup */
PrivateRefCountEntryLast = match;
ReservedRefCountSlot = -1; ReservedRefCountSlot = -1;
@@ -464,6 +471,43 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
} }
} }
/*
* Return the PrivateRefCount entry for the passed buffer.
*
* Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
* do_move is true, and the entry resides in the hashtable the entry is
* optimized for frequent access by moving it to the array.
*/
static inline PrivateRefCountEntry *
GetPrivateRefCountEntry(Buffer buffer, bool do_move)
{
Assert(BufferIsValid(buffer));
Assert(!BufferIsLocal(buffer));
/*
* It's very common to look up the same buffer repeatedly. To make that
* fast, we have a one-entry cache.
*
* In contrast to the loop in GetPrivateRefCountEntrySlow(), here it
* faster to check PrivateRefCountArray[].buffer, as in the case of a hit
* fewer addresses are computed and fewer cachelines are accessed. Whereas
* in GetPrivateRefCountEntrySlow()'s case, checking
* PrivateRefCountArrayKeys saves a lot of memory accesses.
*/
if (likely(PrivateRefCountEntryLast != -1) &&
likely(PrivateRefCountArray[PrivateRefCountEntryLast].buffer == buffer))
{
return &PrivateRefCountArray[PrivateRefCountEntryLast];
}
/*
* The code for the cached lookup is small enough to be worth inlining
* into the caller. In the miss case however, that empirically doesn't
* seem worth it.
*/
return GetPrivateRefCountEntrySlow(buffer, do_move);
}
/* /*
* Returns how many times the passed buffer is pinned by this backend. * Returns how many times the passed buffer is pinned by this backend.
* *