1
0
mirror of https://github.com/postgres/postgres.git synced 2025-10-25 13:17:41 +03:00

Fix BRIN 32-bit counter wrap issue with huge tables

A BlockNumber (32-bit) might not be large enough to add bo_pagesPerRange
to when the table contains close to 2^32 pages.  At worst, this could
result in a cancellable infinite loop during the BRIN index scan with
power-of-2 pagesPerRange, and slow (inefficient) BRIN index scans and
scanning of unneeded heap blocks for non power-of-2 pagesPerRange.

Backpatch to all supported versions.

Author: sunil s <sunilfeb26@gmail.com>
Reviewed-by: David Rowley <dgrowleyml@gmail.com>
Reviewed-by: Michael Paquier <michael@paquier.xyz>
Discussion: https://postgr.es/m/CAOG6S4-tGksTQhVzJM19NzLYAHusXsK2HmADPZzGQcfZABsvpA@mail.gmail.com
Backpatch-through: 13
This commit is contained in:
David Rowley
2025-10-21 20:46:14 +13:00
parent e4e496e88c
commit 9fd29d7ff4

View File

@@ -573,7 +573,6 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Relation heapRel; Relation heapRel;
BrinOpaque *opaque; BrinOpaque *opaque;
BlockNumber nblocks; BlockNumber nblocks;
BlockNumber heapBlk;
int64 totalpages = 0; int64 totalpages = 0;
FmgrInfo *consistentFn; FmgrInfo *consistentFn;
MemoryContext oldcxt; MemoryContext oldcxt;
@@ -735,9 +734,10 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
/* /*
* Now scan the revmap. We start by querying for heap page 0, * Now scan the revmap. We start by querying for heap page 0,
* incrementing by the number of pages per range; this gives us a full * incrementing by the number of pages per range; this gives us a full
* view of the table. * view of the table. We make use of uint64 for heapBlk as a BlockNumber
* could wrap for tables with close to 2^32 pages.
*/ */
for (heapBlk = 0; heapBlk < nblocks; heapBlk += opaque->bo_pagesPerRange) for (uint64 heapBlk = 0; heapBlk < nblocks; heapBlk += opaque->bo_pagesPerRange)
{ {
bool addrange; bool addrange;
bool gottuple = false; bool gottuple = false;
@@ -749,7 +749,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
MemoryContextReset(perRangeCxt); MemoryContextReset(perRangeCxt);
tup = brinGetTupleForHeapBlock(opaque->bo_rmAccess, heapBlk, &buf, tup = brinGetTupleForHeapBlock(opaque->bo_rmAccess, (BlockNumber ) heapBlk, &buf,
&off, &size, BUFFER_LOCK_SHARE); &off, &size, BUFFER_LOCK_SHARE);
if (tup) if (tup)
{ {
@@ -924,7 +924,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
/* add the pages in the range to the output bitmap, if needed */ /* add the pages in the range to the output bitmap, if needed */
if (addrange) if (addrange)
{ {
BlockNumber pageno; uint64 pageno;
for (pageno = heapBlk; for (pageno = heapBlk;
pageno <= Min(nblocks, heapBlk + opaque->bo_pagesPerRange) - 1; pageno <= Min(nblocks, heapBlk + opaque->bo_pagesPerRange) - 1;