1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-19 13:42:17 +03:00

Make table_scan_bitmap_next_block() async-friendly

Move all responsibility for indicating a block is exhuasted into
table_scan_bitmap_next_tuple() and advance the main iterator in
heap-specific code. This flow control makes more sense and is a step
toward using the read stream API for bitmap heap scans.

Previously, table_scan_bitmap_next_block() returned false to indicate
table_scan_bitmap_next_tuple() should not be called for the tuples on
the page. This happened both when 1) there were no visible tuples on the
page and 2) when the block returned by the iterator was past the end of
the table. BitmapHeapNext() (generic bitmap table scan code) handled the
case when the bitmap was exhausted.

It makes more sense for table_scan_bitmap_next_tuple() to return false
when there are no visible tuples on the page and
table_scan_bitmap_next_block() to return false when the bitmap is
exhausted or there are no more blocks in the table.

As part of this new design, TBMIterateResults are no longer used as a
flow control mechanism in BitmapHeapNext(), so we removed
table_scan_bitmap_next_tuple's TBMIterateResult parameter.

Note that the prefetch iterator is still saved in the
BitmapHeapScanState node and advanced in generic bitmap table scan code.
This is because 1) it was not necessary to change the prefetch iterator
location to change the flow control in BitmapHeapNext() 2) modifying
prefetch iterator management requires several more steps better split
over multiple commits and 3) the prefetch iterator will be removed once
the read stream API is used.

Author: Melanie Plageman
Reviewed-by: Tomas Vondra, Andres Freund, Heikki Linnakangas, Mark Dilger
Discussion: https://postgr.es/m/063e4eb4-32d9-439e-a0b1-75565a9835a8%40iki.fi
This commit is contained in:
Melanie Plageman
2024-10-25 10:11:58 -04:00
parent 7bd7aa4d30
commit de380a62b5
6 changed files with 254 additions and 164 deletions

View File

@@ -1387,8 +1387,8 @@ heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
heap_setscanlimits(sscan, startBlk, numBlks);
/* Finally, set the TID range in sscan */
ItemPointerCopy(&lowestItem, &sscan->rs_mintid);
ItemPointerCopy(&highestItem, &sscan->rs_maxtid);
ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
}
bool
@@ -1396,8 +1396,8 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
TupleTableSlot *slot)
{
HeapScanDesc scan = (HeapScanDesc) sscan;
ItemPointer mintid = &sscan->rs_mintid;
ItemPointer maxtid = &sscan->rs_maxtid;
ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
/* Note: no locking manipulations needed */
for (;;)

View File

@@ -2115,18 +2115,49 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths,
static bool
heapam_scan_bitmap_next_block(TableScanDesc scan,
TBMIterateResult *tbmres,
BlockNumber *blockno, bool *recheck,
uint64 *lossy_pages, uint64 *exact_pages)
{
HeapScanDesc hscan = (HeapScanDesc) scan;
BlockNumber block = tbmres->blockno;
BlockNumber block;
Buffer buffer;
Snapshot snapshot;
int ntup;
TBMIterateResult *tbmres;
hscan->rs_cindex = 0;
hscan->rs_ntuples = 0;
*blockno = InvalidBlockNumber;
*recheck = true;
do
{
CHECK_FOR_INTERRUPTS();
if (scan->st.bitmap.rs_shared_iterator)
tbmres = tbm_shared_iterate(scan->st.bitmap.rs_shared_iterator);
else
tbmres = tbm_iterate(scan->st.bitmap.rs_iterator);
if (tbmres == NULL)
return false;
/*
* Ignore any claimed entries past what we think is the end of the
* relation. It may have been extended after the start of our scan (we
* only hold an AccessShareLock, and it could be inserts from this
* backend). We don't take this optimization in SERIALIZABLE
* isolation though, as we need to examine all invisible tuples
* reachable by the index.
*/
} while (!IsolationIsSerializable() &&
tbmres->blockno >= hscan->rs_nblocks);
/* Got a valid block */
*blockno = tbmres->blockno;
*recheck = tbmres->recheck;
/*
* We can skip fetching the heap page if we don't need any fields from the
* heap, the bitmap entries don't need rechecking, and all tuples on the
@@ -2145,16 +2176,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
return true;
}
/*
* Ignore any claimed entries past what we think is the end of the
* relation. It may have been extended after the start of our scan (we
* only hold an AccessShareLock, and it could be inserts from this
* backend). We don't take this optimization in SERIALIZABLE isolation
* though, as we need to examine all invisible tuples reachable by the
* index.
*/
if (!IsolationIsSerializable() && block >= hscan->rs_nblocks)
return false;
block = tbmres->blockno;
/*
* Acquire pin on the target heap page, trading in any pin we held before.
@@ -2249,12 +2271,18 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
else
(*lossy_pages)++;
return ntup > 0;
/*
* Return true to indicate that a valid block was found and the bitmap is
* not exhausted. If there are no visible tuples on this page,
* hscan->rs_ntuples will be 0 and heapam_scan_bitmap_next_tuple() will
* return false returning control to this function to advance to the next
* block in the bitmap.
*/
return true;
}
static bool
heapam_scan_bitmap_next_tuple(TableScanDesc scan,
TBMIterateResult *tbmres,
TupleTableSlot *slot)
{
HeapScanDesc hscan = (HeapScanDesc) scan;