mirror of
https://github.com/postgres/postgres.git
synced 2025-05-15 19:15:29 +03:00
Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
570 lines
14 KiB
C
570 lines
14 KiB
C
/*-------------------------------------------------------------------------
|
|
*
|
|
* nodeSamplescan.c
|
|
* Support routines for sample scans of relations (table sampling).
|
|
*
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
*
|
|
*
|
|
* IDENTIFICATION
|
|
* src/backend/executor/nodeSamplescan.c
|
|
*
|
|
*-------------------------------------------------------------------------
|
|
*/
|
|
#include "postgres.h"
|
|
|
|
#include "access/heapam.h"
|
|
#include "access/relscan.h"
|
|
#include "access/tableam.h"
|
|
#include "access/tsmapi.h"
|
|
#include "executor/executor.h"
|
|
#include "executor/nodeSamplescan.h"
|
|
#include "miscadmin.h"
|
|
#include "pgstat.h"
|
|
#include "storage/bufmgr.h"
|
|
#include "storage/predicate.h"
|
|
#include "utils/builtins.h"
|
|
#include "utils/rel.h"
|
|
|
|
static TupleTableSlot *SampleNext(SampleScanState *node);
|
|
static void tablesample_init(SampleScanState *scanstate);
|
|
static HeapTuple tablesample_getnext(SampleScanState *scanstate);
|
|
static bool SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset,
|
|
HeapScanDesc scan);
|
|
|
|
/* ----------------------------------------------------------------
|
|
* Scan Support
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
|
|
/* ----------------------------------------------------------------
|
|
* SampleNext
|
|
*
|
|
* This is a workhorse for ExecSampleScan
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
static TupleTableSlot *
|
|
SampleNext(SampleScanState *node)
|
|
{
|
|
HeapTuple tuple;
|
|
TupleTableSlot *slot;
|
|
HeapScanDesc hscan;
|
|
|
|
/*
|
|
* if this is first call within a scan, initialize
|
|
*/
|
|
if (!node->begun)
|
|
tablesample_init(node);
|
|
|
|
/*
|
|
* get the next tuple, and store it in our result slot
|
|
*/
|
|
tuple = tablesample_getnext(node);
|
|
|
|
slot = node->ss.ss_ScanTupleSlot;
|
|
hscan = (HeapScanDesc) node->ss.ss_currentScanDesc;
|
|
|
|
if (tuple)
|
|
ExecStoreBufferHeapTuple(tuple, /* tuple to store */
|
|
slot, /* slot to store in */
|
|
hscan->rs_cbuf); /* tuple's buffer */
|
|
else
|
|
ExecClearTuple(slot);
|
|
|
|
return slot;
|
|
}
|
|
|
|
/*
|
|
* SampleRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
|
*/
|
|
static bool
|
|
SampleRecheck(SampleScanState *node, TupleTableSlot *slot)
|
|
{
|
|
/*
|
|
* No need to recheck for SampleScan, since like SeqScan we don't pass any
|
|
* checkable keys to heap_beginscan.
|
|
*/
|
|
return true;
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecSampleScan(node)
|
|
*
|
|
* Scans the relation using the sampling method and returns
|
|
* the next qualifying tuple.
|
|
* We call the ExecScan() routine and pass it the appropriate
|
|
* access method functions.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
static TupleTableSlot *
|
|
ExecSampleScan(PlanState *pstate)
|
|
{
|
|
SampleScanState *node = castNode(SampleScanState, pstate);
|
|
|
|
return ExecScan(&node->ss,
|
|
(ExecScanAccessMtd) SampleNext,
|
|
(ExecScanRecheckMtd) SampleRecheck);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecInitSampleScan
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
SampleScanState *
|
|
ExecInitSampleScan(SampleScan *node, EState *estate, int eflags)
|
|
{
|
|
SampleScanState *scanstate;
|
|
TableSampleClause *tsc = node->tablesample;
|
|
TsmRoutine *tsm;
|
|
|
|
Assert(outerPlan(node) == NULL);
|
|
Assert(innerPlan(node) == NULL);
|
|
|
|
/*
|
|
* create state structure
|
|
*/
|
|
scanstate = makeNode(SampleScanState);
|
|
scanstate->ss.ps.plan = (Plan *) node;
|
|
scanstate->ss.ps.state = estate;
|
|
scanstate->ss.ps.ExecProcNode = ExecSampleScan;
|
|
|
|
/*
|
|
* Miscellaneous initialization
|
|
*
|
|
* create expression context for node
|
|
*/
|
|
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
|
|
|
/*
|
|
* open the scan relation
|
|
*/
|
|
scanstate->ss.ss_currentRelation =
|
|
ExecOpenScanRelation(estate,
|
|
node->scan.scanrelid,
|
|
eflags);
|
|
|
|
/* we won't set up the HeapScanDesc till later */
|
|
scanstate->ss.ss_currentScanDesc = NULL;
|
|
|
|
/* and create slot with appropriate rowtype */
|
|
ExecInitScanTupleSlot(estate, &scanstate->ss,
|
|
RelationGetDescr(scanstate->ss.ss_currentRelation),
|
|
table_slot_callbacks(scanstate->ss.ss_currentRelation));
|
|
|
|
/*
|
|
* Initialize result type and projection.
|
|
*/
|
|
ExecInitResultTypeTL(&scanstate->ss.ps);
|
|
ExecAssignScanProjectionInfo(&scanstate->ss);
|
|
|
|
/*
|
|
* initialize child expressions
|
|
*/
|
|
scanstate->ss.ps.qual =
|
|
ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate);
|
|
|
|
scanstate->args = ExecInitExprList(tsc->args, (PlanState *) scanstate);
|
|
scanstate->repeatable =
|
|
ExecInitExpr(tsc->repeatable, (PlanState *) scanstate);
|
|
|
|
/*
|
|
* If we don't have a REPEATABLE clause, select a random seed. We want to
|
|
* do this just once, since the seed shouldn't change over rescans.
|
|
*/
|
|
if (tsc->repeatable == NULL)
|
|
scanstate->seed = random();
|
|
|
|
/*
|
|
* Finally, initialize the TABLESAMPLE method handler.
|
|
*/
|
|
tsm = GetTsmRoutine(tsc->tsmhandler);
|
|
scanstate->tsmroutine = tsm;
|
|
scanstate->tsm_state = NULL;
|
|
|
|
if (tsm->InitSampleScan)
|
|
tsm->InitSampleScan(scanstate, eflags);
|
|
|
|
/* We'll do BeginSampleScan later; we can't evaluate params yet */
|
|
scanstate->begun = false;
|
|
|
|
return scanstate;
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecEndSampleScan
|
|
*
|
|
* frees any storage allocated through C routines.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecEndSampleScan(SampleScanState *node)
|
|
{
|
|
/*
|
|
* Tell sampling function that we finished the scan.
|
|
*/
|
|
if (node->tsmroutine->EndSampleScan)
|
|
node->tsmroutine->EndSampleScan(node);
|
|
|
|
/*
|
|
* Free the exprcontext
|
|
*/
|
|
ExecFreeExprContext(&node->ss.ps);
|
|
|
|
/*
|
|
* clean out the tuple table
|
|
*/
|
|
if (node->ss.ps.ps_ResultTupleSlot)
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
/*
|
|
* close heap scan
|
|
*/
|
|
if (node->ss.ss_currentScanDesc)
|
|
table_endscan(node->ss.ss_currentScanDesc);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecReScanSampleScan
|
|
*
|
|
* Rescans the relation.
|
|
*
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecReScanSampleScan(SampleScanState *node)
|
|
{
|
|
/* Remember we need to do BeginSampleScan again (if we did it at all) */
|
|
node->begun = false;
|
|
|
|
ExecScanReScan(&node->ss);
|
|
}
|
|
|
|
|
|
/*
|
|
* Initialize the TABLESAMPLE method: evaluate params and call BeginSampleScan.
|
|
*/
|
|
static void
|
|
tablesample_init(SampleScanState *scanstate)
|
|
{
|
|
TsmRoutine *tsm = scanstate->tsmroutine;
|
|
ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
|
|
Datum *params;
|
|
Datum datum;
|
|
bool isnull;
|
|
uint32 seed;
|
|
bool allow_sync;
|
|
int i;
|
|
ListCell *arg;
|
|
|
|
params = (Datum *) palloc(list_length(scanstate->args) * sizeof(Datum));
|
|
|
|
i = 0;
|
|
foreach(arg, scanstate->args)
|
|
{
|
|
ExprState *argstate = (ExprState *) lfirst(arg);
|
|
|
|
params[i] = ExecEvalExprSwitchContext(argstate,
|
|
econtext,
|
|
&isnull);
|
|
if (isnull)
|
|
ereport(ERROR,
|
|
(errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT),
|
|
errmsg("TABLESAMPLE parameter cannot be null")));
|
|
i++;
|
|
}
|
|
|
|
if (scanstate->repeatable)
|
|
{
|
|
datum = ExecEvalExprSwitchContext(scanstate->repeatable,
|
|
econtext,
|
|
&isnull);
|
|
if (isnull)
|
|
ereport(ERROR,
|
|
(errcode(ERRCODE_INVALID_TABLESAMPLE_REPEAT),
|
|
errmsg("TABLESAMPLE REPEATABLE parameter cannot be null")));
|
|
|
|
/*
|
|
* The REPEATABLE parameter has been coerced to float8 by the parser.
|
|
* The reason for using float8 at the SQL level is that it will
|
|
* produce unsurprising results both for users used to databases that
|
|
* accept only integers in the REPEATABLE clause and for those who
|
|
* might expect that REPEATABLE works like setseed() (a float in the
|
|
* range from -1 to 1).
|
|
*
|
|
* We use hashfloat8() to convert the supplied value into a suitable
|
|
* seed. For regression-testing purposes, that has the convenient
|
|
* property that REPEATABLE(0) gives a machine-independent result.
|
|
*/
|
|
seed = DatumGetUInt32(DirectFunctionCall1(hashfloat8, datum));
|
|
}
|
|
else
|
|
{
|
|
/* Use the seed selected by ExecInitSampleScan */
|
|
seed = scanstate->seed;
|
|
}
|
|
|
|
/* Set default values for params that BeginSampleScan can adjust */
|
|
scanstate->use_bulkread = true;
|
|
scanstate->use_pagemode = true;
|
|
|
|
/* Let tablesample method do its thing */
|
|
tsm->BeginSampleScan(scanstate,
|
|
params,
|
|
list_length(scanstate->args),
|
|
seed);
|
|
|
|
/* We'll use syncscan if there's no NextSampleBlock function */
|
|
allow_sync = (tsm->NextSampleBlock == NULL);
|
|
|
|
/* Now we can create or reset the HeapScanDesc */
|
|
if (scanstate->ss.ss_currentScanDesc == NULL)
|
|
{
|
|
scanstate->ss.ss_currentScanDesc =
|
|
table_beginscan_sampling(scanstate->ss.ss_currentRelation,
|
|
scanstate->ss.ps.state->es_snapshot,
|
|
0, NULL,
|
|
scanstate->use_bulkread,
|
|
allow_sync,
|
|
scanstate->use_pagemode);
|
|
}
|
|
else
|
|
{
|
|
table_rescan_set_params(scanstate->ss.ss_currentScanDesc, NULL,
|
|
scanstate->use_bulkread,
|
|
allow_sync,
|
|
scanstate->use_pagemode);
|
|
}
|
|
|
|
pfree(params);
|
|
|
|
/* And we're initialized. */
|
|
scanstate->begun = true;
|
|
}
|
|
|
|
/*
|
|
* Get next tuple from TABLESAMPLE method.
|
|
*
|
|
* Note: an awful lot of this is copied-and-pasted from heapam.c. It would
|
|
* perhaps be better to refactor to share more code.
|
|
*/
|
|
static HeapTuple
|
|
tablesample_getnext(SampleScanState *scanstate)
|
|
{
|
|
TsmRoutine *tsm = scanstate->tsmroutine;
|
|
TableScanDesc scan = scanstate->ss.ss_currentScanDesc;
|
|
HeapScanDesc hscan = (HeapScanDesc) scan;
|
|
HeapTuple tuple = &(hscan->rs_ctup);
|
|
Snapshot snapshot = scan->rs_snapshot;
|
|
bool pagemode = scan->rs_pageatatime;
|
|
BlockNumber blockno;
|
|
Page page;
|
|
bool all_visible;
|
|
OffsetNumber maxoffset;
|
|
|
|
if (!hscan->rs_inited)
|
|
{
|
|
/*
|
|
* return null immediately if relation is empty
|
|
*/
|
|
if (hscan->rs_nblocks == 0)
|
|
{
|
|
Assert(!BufferIsValid(hscan->rs_cbuf));
|
|
tuple->t_data = NULL;
|
|
return NULL;
|
|
}
|
|
if (tsm->NextSampleBlock)
|
|
{
|
|
blockno = tsm->NextSampleBlock(scanstate);
|
|
if (!BlockNumberIsValid(blockno))
|
|
{
|
|
tuple->t_data = NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
else
|
|
blockno = hscan->rs_startblock;
|
|
Assert(blockno < hscan->rs_nblocks);
|
|
heapgetpage(scan, blockno);
|
|
hscan->rs_inited = true;
|
|
}
|
|
else
|
|
{
|
|
/* continue from previously returned page/tuple */
|
|
blockno = hscan->rs_cblock; /* current page */
|
|
}
|
|
|
|
/*
|
|
* When not using pagemode, we must lock the buffer during tuple
|
|
* visibility checks.
|
|
*/
|
|
if (!pagemode)
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
|
|
|
page = (Page) BufferGetPage(hscan->rs_cbuf);
|
|
all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
|
|
maxoffset = PageGetMaxOffsetNumber(page);
|
|
|
|
for (;;)
|
|
{
|
|
OffsetNumber tupoffset;
|
|
bool finished;
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
/* Ask the tablesample method which tuples to check on this page. */
|
|
tupoffset = tsm->NextSampleTuple(scanstate,
|
|
blockno,
|
|
maxoffset);
|
|
|
|
if (OffsetNumberIsValid(tupoffset))
|
|
{
|
|
ItemId itemid;
|
|
bool visible;
|
|
|
|
/* Skip invalid tuple pointers. */
|
|
itemid = PageGetItemId(page, tupoffset);
|
|
if (!ItemIdIsNormal(itemid))
|
|
continue;
|
|
|
|
tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
|
|
tuple->t_len = ItemIdGetLength(itemid);
|
|
ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
|
|
|
|
if (all_visible)
|
|
visible = true;
|
|
else
|
|
visible = SampleTupleVisible(tuple, tupoffset, hscan);
|
|
|
|
/* in pagemode, heapgetpage did this for us */
|
|
if (!pagemode)
|
|
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
|
|
hscan->rs_cbuf, snapshot);
|
|
|
|
if (visible)
|
|
{
|
|
/* Found visible tuple, return it. */
|
|
if (!pagemode)
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
/* Try next tuple from same page. */
|
|
continue;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* if we get here, it means we've exhausted the items on this page and
|
|
* it's time to move to the next.
|
|
*/
|
|
if (!pagemode)
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
if (tsm->NextSampleBlock)
|
|
{
|
|
blockno = tsm->NextSampleBlock(scanstate);
|
|
Assert(!scan->rs_syncscan);
|
|
finished = !BlockNumberIsValid(blockno);
|
|
}
|
|
else
|
|
{
|
|
/* Without NextSampleBlock, just do a plain forward seqscan. */
|
|
blockno++;
|
|
if (blockno >= hscan->rs_nblocks)
|
|
blockno = 0;
|
|
|
|
/*
|
|
* Report our new scan position for synchronization purposes.
|
|
*
|
|
* Note: we do this before checking for end of scan so that the
|
|
* final state of the position hint is back at the start of the
|
|
* rel. That's not strictly necessary, but otherwise when you run
|
|
* the same query multiple times the starting position would shift
|
|
* a little bit backwards on every invocation, which is confusing.
|
|
* We don't guarantee any specific ordering in general, though.
|
|
*/
|
|
if (scan->rs_syncscan)
|
|
ss_report_location(scan->rs_rd, blockno);
|
|
|
|
finished = (blockno == hscan->rs_startblock);
|
|
}
|
|
|
|
/*
|
|
* Reached end of scan?
|
|
*/
|
|
if (finished)
|
|
{
|
|
if (BufferIsValid(hscan->rs_cbuf))
|
|
ReleaseBuffer(hscan->rs_cbuf);
|
|
hscan->rs_cbuf = InvalidBuffer;
|
|
hscan->rs_cblock = InvalidBlockNumber;
|
|
tuple->t_data = NULL;
|
|
hscan->rs_inited = false;
|
|
return NULL;
|
|
}
|
|
|
|
Assert(blockno < hscan->rs_nblocks);
|
|
heapgetpage(scan, blockno);
|
|
|
|
/* Re-establish state for new page */
|
|
if (!pagemode)
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
|
|
|
page = (Page) BufferGetPage(hscan->rs_cbuf);
|
|
all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
|
|
maxoffset = PageGetMaxOffsetNumber(page);
|
|
}
|
|
|
|
/* Count successfully-fetched tuples as heap fetches */
|
|
pgstat_count_heap_getnext(scan->rs_rd);
|
|
|
|
return &(hscan->rs_ctup);
|
|
}
|
|
|
|
/*
|
|
* Check visibility of the tuple.
|
|
*/
|
|
static bool
|
|
SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
|
|
{
|
|
if (scan->rs_base.rs_pageatatime)
|
|
{
|
|
/*
|
|
* In pageatatime mode, heapgetpage() already did visibility checks,
|
|
* so just look at the info it left in rs_vistuples[].
|
|
*
|
|
* We use a binary search over the known-sorted array. Note: we could
|
|
* save some effort if we insisted that NextSampleTuple select tuples
|
|
* in increasing order, but it's not clear that there would be enough
|
|
* gain to justify the restriction.
|
|
*/
|
|
int start = 0,
|
|
end = scan->rs_ntuples - 1;
|
|
|
|
while (start <= end)
|
|
{
|
|
int mid = (start + end) / 2;
|
|
OffsetNumber curoffset = scan->rs_vistuples[mid];
|
|
|
|
if (tupoffset == curoffset)
|
|
return true;
|
|
else if (tupoffset < curoffset)
|
|
end = mid - 1;
|
|
else
|
|
start = mid + 1;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
else
|
|
{
|
|
/* Otherwise, we have to check the tuple individually. */
|
|
return HeapTupleSatisfiesVisibility(tuple,
|
|
scan->rs_base.rs_snapshot,
|
|
scan->rs_cbuf);
|
|
}
|
|
}
|