1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-07 19:06:32 +03:00

BitmapHeapScan: Push skip_fetch optimization into table AM

Commit 7c70996ebf introduced an optimization to allow bitmap
scans to operate like index-only scans by not fetching a block from the
heap if none of the underlying data is needed and the block is marked
all visible in the visibility map.

With the introduction of table AMs, a FIXME was added to this code
indicating that the skip_fetch logic should be pushed into the table
AM-specific code, as not all table AMs may use a visibility map in the
same way.

This commit resolves this FIXME for the current block. The layering
violation is still present in BitmapHeapScans's prefetching code, which
uses the visibility map to decide whether or not to prefetch a block.
However, this can be addressed independently.

Author: Melanie Plageman
Reviewed-by: Andres Freund, Heikki Linnakangas, Tomas Vondra, Mark Dilger
Discussion: https://postgr.es/m/CAAKRu_ZwCwWFeL_H3ia26bP2e7HiKLWt0ZmGXPVwPO6uXq0vaA%40mail.gmail.com
This commit is contained in:
Tomas Vondra
2024-04-07 00:24:12 +02:00
parent 87c21bb941
commit 04e72ed617
6 changed files with 106 additions and 94 deletions

View File

@@ -967,6 +967,8 @@ heap_beginscan(Relation relation, Snapshot snapshot,
scan->rs_base.rs_flags = flags;
scan->rs_base.rs_parallel = parallel_scan;
scan->rs_strategy = NULL; /* set in initscan */
scan->rs_vmbuffer = InvalidBuffer;
scan->rs_empty_tuples_pending = 0;
/*
* Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
@@ -1055,6 +1057,14 @@ heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
if (BufferIsValid(scan->rs_vmbuffer))
{
ReleaseBuffer(scan->rs_vmbuffer);
scan->rs_vmbuffer = InvalidBuffer;
}
Assert(scan->rs_empty_tuples_pending == 0);
/*
* reinitialize scan descriptor
*/
@@ -1074,6 +1084,11 @@ heap_endscan(TableScanDesc sscan)
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
if (BufferIsValid(scan->rs_vmbuffer))
ReleaseBuffer(scan->rs_vmbuffer);
Assert(scan->rs_empty_tuples_pending == 0);
/*
* decrement relation reference count and free scan descriptor storage
*/

View File

@@ -27,6 +27,7 @@
#include "access/syncscan.h"
#include "access/tableam.h"
#include "access/tsmapi.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/index.h"
@@ -2198,6 +2199,24 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
hscan->rs_cindex = 0;
hscan->rs_ntuples = 0;
/*
* We can skip fetching the heap page if we don't need any fields from the
* heap, the bitmap entries don't need rechecking, and all tuples on the
* page are visible to our transaction.
*/
if (!(scan->rs_flags & SO_NEED_TUPLES) &&
!tbmres->recheck &&
VM_ALL_VISIBLE(scan->rs_rd, tbmres->blockno, &hscan->rs_vmbuffer))
{
/* can't be lossy in the skip_fetch case */
Assert(tbmres->ntuples >= 0);
Assert(hscan->rs_empty_tuples_pending >= 0);
hscan->rs_empty_tuples_pending += tbmres->ntuples;
return true;
}
/*
* Ignore any claimed entries past what we think is the end of the
* relation. It may have been extended after the start of our scan (we
@@ -2310,6 +2329,16 @@ heapam_scan_bitmap_next_tuple(TableScanDesc scan,
Page page;
ItemId lp;
if (hscan->rs_empty_tuples_pending > 0)
{
/*
* If we don't have to fetch the tuple, just return nulls.
*/
ExecStoreAllNullTuple(slot);
hscan->rs_empty_tuples_pending--;
return true;
}
/*
* Out of range? If so, nothing more to look at on this page
*/