mirror of
https://github.com/postgres/postgres.git
synced 2025-04-25 21:42:33 +03:00
Remove table AM callback scan_bitmap_next_block
After pushing the bitmap iterator into table-AM specific code (as part of making bitmap heap scan use the read stream API in 2b73a8cd33b7), scan_bitmap_next_block() no longer returns the current block number. Since scan_bitmap_next_block() isn't returning any relevant information to bitmap table scan code, it makes more sense to get rid of it. Now, bitmap table scan code only calls table_scan_bitmap_next_tuple(), and the heap AM implementation of scan_bitmap_next_block() is a local helper in heapam_handler.c. Reviewed-by: Tomas Vondra <tomas@vondra.me> Discussion: https://postgr.es/m/flat/CAAKRu_ZwCwWFeL_H3ia26bP2e7HiKLWt0ZmGXPVwPO6uXq0vaA%40mail.gmail.com
This commit is contained in:
parent
2b73a8cd33
commit
c3953226a0
@ -56,6 +56,10 @@ static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
|
||||
|
||||
static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan);
|
||||
|
||||
static bool BitmapHeapScanNextBlock(TableScanDesc scan,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages, uint64 *exact_pages);
|
||||
|
||||
|
||||
/* ------------------------------------------------------------------------
|
||||
* Slot related callbacks for heap AM
|
||||
@ -2115,176 +2119,12 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths,
|
||||
* ------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static bool
|
||||
heapam_scan_bitmap_next_block(TableScanDesc scan,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages, uint64 *exact_pages)
|
||||
{
|
||||
BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
|
||||
HeapScanDesc hscan = (HeapScanDesc) bscan;
|
||||
BlockNumber block;
|
||||
void *per_buffer_data;
|
||||
Buffer buffer;
|
||||
Snapshot snapshot;
|
||||
int ntup;
|
||||
TBMIterateResult *tbmres;
|
||||
OffsetNumber offsets[TBM_MAX_TUPLES_PER_PAGE];
|
||||
int noffsets = -1;
|
||||
|
||||
Assert(scan->rs_flags & SO_TYPE_BITMAPSCAN);
|
||||
Assert(hscan->rs_read_stream);
|
||||
|
||||
hscan->rs_cindex = 0;
|
||||
hscan->rs_ntuples = 0;
|
||||
|
||||
/* Release buffer containing previous block. */
|
||||
if (BufferIsValid(hscan->rs_cbuf))
|
||||
{
|
||||
ReleaseBuffer(hscan->rs_cbuf);
|
||||
hscan->rs_cbuf = InvalidBuffer;
|
||||
}
|
||||
|
||||
hscan->rs_cbuf = read_stream_next_buffer(hscan->rs_read_stream,
|
||||
&per_buffer_data);
|
||||
|
||||
if (BufferIsInvalid(hscan->rs_cbuf))
|
||||
{
|
||||
if (BufferIsValid(bscan->rs_vmbuffer))
|
||||
{
|
||||
ReleaseBuffer(bscan->rs_vmbuffer);
|
||||
bscan->rs_vmbuffer = InvalidBuffer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bitmap is exhausted. Time to emit empty tuples if relevant. We emit
|
||||
* all empty tuples at the end instead of emitting them per block we
|
||||
* skip fetching. This is necessary because the streaming read API
|
||||
* will only return TBMIterateResults for blocks actually fetched.
|
||||
* When we skip fetching a block, we keep track of how many empty
|
||||
* tuples to emit at the end of the BitmapHeapScan. We do not recheck
|
||||
* all NULL tuples.
|
||||
*/
|
||||
*recheck = false;
|
||||
return bscan->rs_empty_tuples_pending > 0;
|
||||
}
|
||||
|
||||
Assert(per_buffer_data);
|
||||
|
||||
tbmres = per_buffer_data;
|
||||
|
||||
Assert(BlockNumberIsValid(tbmres->blockno));
|
||||
Assert(BufferGetBlockNumber(hscan->rs_cbuf) == tbmres->blockno);
|
||||
|
||||
/* Exact pages need their tuple offsets extracted. */
|
||||
if (!tbmres->lossy)
|
||||
noffsets = tbm_extract_page_tuple(tbmres, offsets,
|
||||
TBM_MAX_TUPLES_PER_PAGE);
|
||||
|
||||
*recheck = tbmres->recheck;
|
||||
|
||||
block = hscan->rs_cblock = tbmres->blockno;
|
||||
buffer = hscan->rs_cbuf;
|
||||
snapshot = scan->rs_snapshot;
|
||||
|
||||
ntup = 0;
|
||||
|
||||
/*
|
||||
* Prune and repair fragmentation for the whole page, if possible.
|
||||
*/
|
||||
heap_page_prune_opt(scan->rs_rd, buffer);
|
||||
|
||||
/*
|
||||
* We must hold share lock on the buffer content while examining tuple
|
||||
* visibility. Afterwards, however, the tuples we have found to be
|
||||
* visible are guaranteed good as long as we hold the buffer pin.
|
||||
*/
|
||||
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
||||
|
||||
/*
|
||||
* We need two separate strategies for lossy and non-lossy cases.
|
||||
*/
|
||||
if (!tbmres->lossy)
|
||||
{
|
||||
/*
|
||||
* Bitmap is non-lossy, so we just look through the offsets listed in
|
||||
* tbmres; but we have to follow any HOT chain starting at each such
|
||||
* offset.
|
||||
*/
|
||||
int curslot;
|
||||
|
||||
/* We must have extracted the tuple offsets by now */
|
||||
Assert(noffsets > -1);
|
||||
|
||||
for (curslot = 0; curslot < noffsets; curslot++)
|
||||
{
|
||||
OffsetNumber offnum = offsets[curslot];
|
||||
ItemPointerData tid;
|
||||
HeapTupleData heapTuple;
|
||||
|
||||
ItemPointerSet(&tid, block, offnum);
|
||||
if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
|
||||
&heapTuple, NULL, true))
|
||||
hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Bitmap is lossy, so we must examine each line pointer on the page.
|
||||
* But we can ignore HOT chains, since we'll check each tuple anyway.
|
||||
*/
|
||||
Page page = BufferGetPage(buffer);
|
||||
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
|
||||
OffsetNumber offnum;
|
||||
|
||||
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
|
||||
{
|
||||
ItemId lp;
|
||||
HeapTupleData loctup;
|
||||
bool valid;
|
||||
|
||||
lp = PageGetItemId(page, offnum);
|
||||
if (!ItemIdIsNormal(lp))
|
||||
continue;
|
||||
loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
|
||||
loctup.t_len = ItemIdGetLength(lp);
|
||||
loctup.t_tableOid = scan->rs_rd->rd_id;
|
||||
ItemPointerSet(&loctup.t_self, block, offnum);
|
||||
valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
|
||||
if (valid)
|
||||
{
|
||||
hscan->rs_vistuples[ntup++] = offnum;
|
||||
PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
|
||||
HeapTupleHeaderGetXmin(loctup.t_data));
|
||||
}
|
||||
HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
|
||||
buffer, snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
Assert(ntup <= MaxHeapTuplesPerPage);
|
||||
hscan->rs_ntuples = ntup;
|
||||
|
||||
if (tbmres->lossy)
|
||||
(*lossy_pages)++;
|
||||
else
|
||||
(*exact_pages)++;
|
||||
|
||||
/*
|
||||
* Return true to indicate that a valid block was found and the bitmap is
|
||||
* not exhausted. If there are no visible tuples on this page,
|
||||
* hscan->rs_ntuples will be 0 and heapam_scan_bitmap_next_tuple() will
|
||||
* return false returning control to this function to advance to the next
|
||||
* block in the bitmap.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
heapam_scan_bitmap_next_tuple(TableScanDesc scan,
|
||||
TupleTableSlot *slot)
|
||||
TupleTableSlot *slot,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages,
|
||||
uint64 *exact_pages)
|
||||
{
|
||||
BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
|
||||
HeapScanDesc hscan = (HeapScanDesc) bscan;
|
||||
@ -2292,21 +2132,31 @@ heapam_scan_bitmap_next_tuple(TableScanDesc scan,
|
||||
Page page;
|
||||
ItemId lp;
|
||||
|
||||
if (bscan->rs_empty_tuples_pending > 0)
|
||||
{
|
||||
/*
|
||||
* If we don't have to fetch the tuple, just return nulls.
|
||||
*/
|
||||
ExecStoreAllNullTuple(slot);
|
||||
bscan->rs_empty_tuples_pending--;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Out of range? If so, nothing more to look at on this page
|
||||
*/
|
||||
if (hscan->rs_cindex >= hscan->rs_ntuples)
|
||||
return false;
|
||||
while (hscan->rs_cindex >= hscan->rs_ntuples)
|
||||
{
|
||||
/*
|
||||
* Emit empty tuples before advancing to the next block
|
||||
*/
|
||||
if (bscan->rs_empty_tuples_pending > 0)
|
||||
{
|
||||
/*
|
||||
* If we don't have to fetch the tuple, just return nulls.
|
||||
*/
|
||||
ExecStoreAllNullTuple(slot);
|
||||
bscan->rs_empty_tuples_pending--;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns false if the bitmap is exhausted and there are no further
|
||||
* blocks we need to scan.
|
||||
*/
|
||||
if (!BitmapHeapScanNextBlock(scan, recheck, lossy_pages, exact_pages))
|
||||
return false;
|
||||
}
|
||||
|
||||
targoffset = hscan->rs_vistuples[hscan->rs_cindex];
|
||||
page = BufferGetPage(hscan->rs_cbuf);
|
||||
@ -2614,6 +2464,177 @@ SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function get the next block of a bitmap heap scan. Returns true when
|
||||
* it got the next block and saved it in the scan descriptor and false when
|
||||
* the bitmap and or relation are exhausted.
|
||||
*/
|
||||
static bool
|
||||
BitmapHeapScanNextBlock(TableScanDesc scan,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages, uint64 *exact_pages)
|
||||
{
|
||||
BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
|
||||
HeapScanDesc hscan = (HeapScanDesc) bscan;
|
||||
BlockNumber block;
|
||||
void *per_buffer_data;
|
||||
Buffer buffer;
|
||||
Snapshot snapshot;
|
||||
int ntup;
|
||||
TBMIterateResult *tbmres;
|
||||
OffsetNumber offsets[TBM_MAX_TUPLES_PER_PAGE];
|
||||
int noffsets = -1;
|
||||
|
||||
Assert(scan->rs_flags & SO_TYPE_BITMAPSCAN);
|
||||
Assert(hscan->rs_read_stream);
|
||||
|
||||
hscan->rs_cindex = 0;
|
||||
hscan->rs_ntuples = 0;
|
||||
|
||||
/* Release buffer containing previous block. */
|
||||
if (BufferIsValid(hscan->rs_cbuf))
|
||||
{
|
||||
ReleaseBuffer(hscan->rs_cbuf);
|
||||
hscan->rs_cbuf = InvalidBuffer;
|
||||
}
|
||||
|
||||
hscan->rs_cbuf = read_stream_next_buffer(hscan->rs_read_stream,
|
||||
&per_buffer_data);
|
||||
|
||||
if (BufferIsInvalid(hscan->rs_cbuf))
|
||||
{
|
||||
if (BufferIsValid(bscan->rs_vmbuffer))
|
||||
{
|
||||
ReleaseBuffer(bscan->rs_vmbuffer);
|
||||
bscan->rs_vmbuffer = InvalidBuffer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bitmap is exhausted. Time to emit empty tuples if relevant. We emit
|
||||
* all empty tuples at the end instead of emitting them per block we
|
||||
* skip fetching. This is necessary because the streaming read API
|
||||
* will only return TBMIterateResults for blocks actually fetched.
|
||||
* When we skip fetching a block, we keep track of how many empty
|
||||
* tuples to emit at the end of the BitmapHeapScan. We do not recheck
|
||||
* all NULL tuples.
|
||||
*/
|
||||
*recheck = false;
|
||||
return bscan->rs_empty_tuples_pending > 0;
|
||||
}
|
||||
|
||||
Assert(per_buffer_data);
|
||||
|
||||
tbmres = per_buffer_data;
|
||||
|
||||
Assert(BlockNumberIsValid(tbmres->blockno));
|
||||
Assert(BufferGetBlockNumber(hscan->rs_cbuf) == tbmres->blockno);
|
||||
|
||||
/* Exact pages need their tuple offsets extracted. */
|
||||
if (!tbmres->lossy)
|
||||
noffsets = tbm_extract_page_tuple(tbmres, offsets,
|
||||
TBM_MAX_TUPLES_PER_PAGE);
|
||||
|
||||
*recheck = tbmres->recheck;
|
||||
|
||||
block = hscan->rs_cblock = tbmres->blockno;
|
||||
buffer = hscan->rs_cbuf;
|
||||
snapshot = scan->rs_snapshot;
|
||||
|
||||
ntup = 0;
|
||||
|
||||
/*
|
||||
* Prune and repair fragmentation for the whole page, if possible.
|
||||
*/
|
||||
heap_page_prune_opt(scan->rs_rd, buffer);
|
||||
|
||||
/*
|
||||
* We must hold share lock on the buffer content while examining tuple
|
||||
* visibility. Afterwards, however, the tuples we have found to be
|
||||
* visible are guaranteed good as long as we hold the buffer pin.
|
||||
*/
|
||||
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
||||
|
||||
/*
|
||||
* We need two separate strategies for lossy and non-lossy cases.
|
||||
*/
|
||||
if (!tbmres->lossy)
|
||||
{
|
||||
/*
|
||||
* Bitmap is non-lossy, so we just look through the offsets listed in
|
||||
* tbmres; but we have to follow any HOT chain starting at each such
|
||||
* offset.
|
||||
*/
|
||||
int curslot;
|
||||
|
||||
/* We must have extracted the tuple offsets by now */
|
||||
Assert(noffsets > -1);
|
||||
|
||||
for (curslot = 0; curslot < noffsets; curslot++)
|
||||
{
|
||||
OffsetNumber offnum = offsets[curslot];
|
||||
ItemPointerData tid;
|
||||
HeapTupleData heapTuple;
|
||||
|
||||
ItemPointerSet(&tid, block, offnum);
|
||||
if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
|
||||
&heapTuple, NULL, true))
|
||||
hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Bitmap is lossy, so we must examine each line pointer on the page.
|
||||
* But we can ignore HOT chains, since we'll check each tuple anyway.
|
||||
*/
|
||||
Page page = BufferGetPage(buffer);
|
||||
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
|
||||
OffsetNumber offnum;
|
||||
|
||||
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
|
||||
{
|
||||
ItemId lp;
|
||||
HeapTupleData loctup;
|
||||
bool valid;
|
||||
|
||||
lp = PageGetItemId(page, offnum);
|
||||
if (!ItemIdIsNormal(lp))
|
||||
continue;
|
||||
loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
|
||||
loctup.t_len = ItemIdGetLength(lp);
|
||||
loctup.t_tableOid = scan->rs_rd->rd_id;
|
||||
ItemPointerSet(&loctup.t_self, block, offnum);
|
||||
valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
|
||||
if (valid)
|
||||
{
|
||||
hscan->rs_vistuples[ntup++] = offnum;
|
||||
PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
|
||||
HeapTupleHeaderGetXmin(loctup.t_data));
|
||||
}
|
||||
HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
|
||||
buffer, snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
Assert(ntup <= MaxHeapTuplesPerPage);
|
||||
hscan->rs_ntuples = ntup;
|
||||
|
||||
if (tbmres->lossy)
|
||||
(*lossy_pages)++;
|
||||
else
|
||||
(*exact_pages)++;
|
||||
|
||||
/*
|
||||
* Return true to indicate that a valid block was found and the bitmap is
|
||||
* not exhausted. If there are no visible tuples on this page,
|
||||
* hscan->rs_ntuples will be 0 and heapam_scan_bitmap_next_tuple() will
|
||||
* return false returning control to this function to advance to the next
|
||||
* block in the bitmap.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------
|
||||
* Definition of the heap table access method.
|
||||
@ -2673,7 +2694,6 @@ static const TableAmRoutine heapam_methods = {
|
||||
|
||||
.relation_estimate_size = heapam_estimate_rel_size,
|
||||
|
||||
.scan_bitmap_next_block = heapam_scan_bitmap_next_block,
|
||||
.scan_bitmap_next_tuple = heapam_scan_bitmap_next_tuple,
|
||||
.scan_sample_next_block = heapam_scan_sample_next_block,
|
||||
.scan_sample_next_tuple = heapam_scan_sample_next_tuple
|
||||
|
@ -91,9 +91,6 @@ GetTableAmRoutine(Oid amhandler)
|
||||
|
||||
Assert(routine->relation_estimate_size != NULL);
|
||||
|
||||
/* optional, but one callback implies presence of the other */
|
||||
Assert((routine->scan_bitmap_next_block == NULL) ==
|
||||
(routine->scan_bitmap_next_tuple == NULL));
|
||||
Assert(routine->scan_sample_next_block != NULL);
|
||||
Assert(routine->scan_sample_next_tuple != NULL);
|
||||
|
||||
|
@ -138,69 +138,44 @@ BitmapTableScanSetup(BitmapHeapScanState *node)
|
||||
static TupleTableSlot *
|
||||
BitmapHeapNext(BitmapHeapScanState *node)
|
||||
{
|
||||
ExprContext *econtext;
|
||||
TableScanDesc scan;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
/*
|
||||
* extract necessary information from index scan node
|
||||
*/
|
||||
econtext = node->ss.ps.ps_ExprContext;
|
||||
slot = node->ss.ss_ScanTupleSlot;
|
||||
scan = node->ss.ss_currentScanDesc;
|
||||
ExprContext *econtext = node->ss.ps.ps_ExprContext;
|
||||
TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
|
||||
|
||||
/*
|
||||
* If we haven't yet performed the underlying index scan, do it, and begin
|
||||
* the iteration over the bitmap.
|
||||
*/
|
||||
if (!node->initialized)
|
||||
{
|
||||
BitmapTableScanSetup(node);
|
||||
scan = node->ss.ss_currentScanDesc;
|
||||
goto new_page;
|
||||
}
|
||||
|
||||
for (;;)
|
||||
while (table_scan_bitmap_next_tuple(node->ss.ss_currentScanDesc,
|
||||
slot, &node->recheck,
|
||||
&node->stats.lossy_pages,
|
||||
&node->stats.exact_pages))
|
||||
{
|
||||
while (table_scan_bitmap_next_tuple(scan, slot))
|
||||
{
|
||||
/*
|
||||
* Continuing in previously obtained page.
|
||||
*/
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/*
|
||||
* If we are using lossy info, we have to recheck the qual
|
||||
* conditions at every tuple.
|
||||
*/
|
||||
if (node->recheck)
|
||||
{
|
||||
econtext->ecxt_scantuple = slot;
|
||||
if (!ExecQualAndReset(node->bitmapqualorig, econtext))
|
||||
{
|
||||
/* Fails recheck, so drop it and loop back for another */
|
||||
InstrCountFiltered2(node, 1);
|
||||
ExecClearTuple(slot);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* OK to return this tuple */
|
||||
return slot;
|
||||
}
|
||||
|
||||
new_page:
|
||||
/*
|
||||
* Continuing in previously obtained page.
|
||||
*/
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/*
|
||||
* Returns false if the bitmap is exhausted and there are no further
|
||||
* blocks we need to scan.
|
||||
* If we are using lossy info, we have to recheck the qual conditions
|
||||
* at every tuple.
|
||||
*/
|
||||
if (!table_scan_bitmap_next_block(scan,
|
||||
&node->recheck,
|
||||
&node->stats.lossy_pages,
|
||||
&node->stats.exact_pages))
|
||||
break;
|
||||
if (node->recheck)
|
||||
{
|
||||
econtext->ecxt_scantuple = slot;
|
||||
if (!ExecQualAndReset(node->bitmapqualorig, econtext))
|
||||
{
|
||||
/* Fails recheck, so drop it and loop back for another */
|
||||
InstrCountFiltered2(node, 1);
|
||||
ExecClearTuple(slot);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* OK to return this tuple */
|
||||
return slot;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -325,7 +325,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
|
||||
info->amcanparallel = amroutine->amcanparallel;
|
||||
info->amhasgettuple = (amroutine->amgettuple != NULL);
|
||||
info->amhasgetbitmap = amroutine->amgetbitmap != NULL &&
|
||||
relation->rd_tableam->scan_bitmap_next_block != NULL;
|
||||
relation->rd_tableam->scan_bitmap_next_tuple != NULL;
|
||||
info->amcanmarkpos = (amroutine->ammarkpos != NULL &&
|
||||
amroutine->amrestrpos != NULL);
|
||||
info->amcostestimate = amroutine->amcostestimate;
|
||||
|
@ -779,43 +779,23 @@ typedef struct TableAmRoutine
|
||||
* ------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* Prepare to fetch / check / return tuples from `blockno` as part of a
|
||||
* bitmap table scan. `scan` was started via table_beginscan_bm(). Return
|
||||
* false if the bitmap is exhausted and true otherwise.
|
||||
*
|
||||
* This will typically read and pin the target block, and do the necessary
|
||||
* work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might
|
||||
* make sense to perform tuple visibility checks at this time).
|
||||
*
|
||||
* `lossy_pages` and `exact_pages` are EXPLAIN counters that can be
|
||||
* incremented by the table AM to indicate whether or not the block's
|
||||
* representation in the bitmap is lossy.
|
||||
*
|
||||
* `recheck` is set by the table AM to indicate whether or not the tuples
|
||||
* from this block should be rechecked. Tuples from lossy pages will
|
||||
* always need to be rechecked, but some non-lossy pages' tuples may also
|
||||
* require recheck.
|
||||
*
|
||||
* Prefetching additional data from the bitmap is left to the table AM.
|
||||
*
|
||||
* Optional callback, but either both scan_bitmap_next_block and
|
||||
* scan_bitmap_next_tuple need to exist, or neither.
|
||||
*/
|
||||
bool (*scan_bitmap_next_block) (TableScanDesc scan,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages,
|
||||
uint64 *exact_pages);
|
||||
|
||||
/*
|
||||
* Fetch the next tuple of a bitmap table scan into `slot` and return true
|
||||
* if a visible tuple was found, false otherwise.
|
||||
*
|
||||
* Optional callback, but either both scan_bitmap_next_block and
|
||||
* scan_bitmap_next_tuple need to exist, or neither.
|
||||
* `lossy_pages` is incremented if the bitmap is lossy for the selected
|
||||
* page; otherwise, `exact_pages` is incremented. These are tracked for
|
||||
* display in EXPLAIN ANALYZE output.
|
||||
*
|
||||
* Prefetching additional data from the bitmap is left to the table AM.
|
||||
*
|
||||
* This is an optional callback.
|
||||
*/
|
||||
bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
|
||||
TupleTableSlot *slot);
|
||||
TupleTableSlot *slot,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages,
|
||||
uint64 *exact_pages);
|
||||
|
||||
/*
|
||||
* Prepare to fetch tuples from the next block in a sample scan. Return
|
||||
@ -1939,51 +1919,24 @@ table_relation_estimate_size(Relation rel, int32 *attr_widths,
|
||||
*/
|
||||
|
||||
/*
|
||||
* Prepare to fetch / check / return tuples as part of a bitmap table scan.
|
||||
* `scan` needs to have been started via table_beginscan_bm(). Returns false
|
||||
* if there are no more blocks in the bitmap, true otherwise.
|
||||
* Fetch / check / return tuples as part of a bitmap table scan. `scan` needs
|
||||
* to have been started via table_beginscan_bm(). Fetch the next tuple of a
|
||||
* bitmap table scan into `slot` and return true if a visible tuple was found,
|
||||
* false otherwise.
|
||||
*
|
||||
* `lossy_pages` and `exact_pages` are EXPLAIN counters that can be
|
||||
* incremented by the table AM to indicate whether or not the block's
|
||||
* representation in the bitmap is lossy.
|
||||
* `recheck` is set by the table AM to indicate whether or not the tuple in
|
||||
* `slot` should be rechecked. Tuples from lossy pages will always need to be
|
||||
* rechecked, but some non-lossy pages' tuples may also require recheck.
|
||||
*
|
||||
* `recheck` is set by the table AM to indicate whether or not the tuples
|
||||
* from this block should be rechecked.
|
||||
*
|
||||
* Note, this is an optionally implemented function, therefore should only be
|
||||
* used after verifying the presence (at plan time or such).
|
||||
*/
|
||||
static inline bool
|
||||
table_scan_bitmap_next_block(TableScanDesc scan,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages,
|
||||
uint64 *exact_pages)
|
||||
{
|
||||
/*
|
||||
* We don't expect direct calls to table_scan_bitmap_next_block with valid
|
||||
* CheckXidAlive for catalog or regular tables. See detailed comments in
|
||||
* xact.c where these variables are declared.
|
||||
*/
|
||||
if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
|
||||
elog(ERROR, "unexpected table_scan_bitmap_next_block call during logical decoding");
|
||||
|
||||
return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan,
|
||||
recheck,
|
||||
lossy_pages,
|
||||
exact_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch the next tuple of a bitmap table scan into `slot` and return true if
|
||||
* a visible tuple was found, false otherwise.
|
||||
* table_scan_bitmap_next_block() needs to previously have selected a
|
||||
* block (i.e. returned true), and no previous
|
||||
* table_scan_bitmap_next_tuple() for the same block may have
|
||||
* returned false.
|
||||
* `lossy_pages` is incremented if the block's representation in the bitmap is
|
||||
* lossy; otherwise, `exact_pages` is incremented.
|
||||
*/
|
||||
static inline bool
|
||||
table_scan_bitmap_next_tuple(TableScanDesc scan,
|
||||
TupleTableSlot *slot)
|
||||
TupleTableSlot *slot,
|
||||
bool *recheck,
|
||||
uint64 *lossy_pages,
|
||||
uint64 *exact_pages)
|
||||
{
|
||||
/*
|
||||
* We don't expect direct calls to table_scan_bitmap_next_tuple with valid
|
||||
@ -1994,7 +1947,10 @@ table_scan_bitmap_next_tuple(TableScanDesc scan,
|
||||
elog(ERROR, "unexpected table_scan_bitmap_next_tuple call during logical decoding");
|
||||
|
||||
return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan,
|
||||
slot);
|
||||
slot,
|
||||
recheck,
|
||||
lossy_pages,
|
||||
exact_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user