mirror of
https://github.com/postgres/postgres.git
synced 2025-11-22 12:22:45 +03:00
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
This commit is contained in:
@@ -28,6 +28,7 @@
|
||||
#include "access/multixact.h"
|
||||
#include "access/relscan.h"
|
||||
#include "access/sysattr.h"
|
||||
#include "access/tableam.h"
|
||||
#include "access/transam.h"
|
||||
#include "access/visibilitymap.h"
|
||||
#include "access/xact.h"
|
||||
@@ -2138,7 +2139,7 @@ index_update_stats(Relation rel,
|
||||
ReindexIsProcessingHeap(RelationRelationId))
|
||||
{
|
||||
/* don't assume syscache will work */
|
||||
HeapScanDesc pg_class_scan;
|
||||
TableScanDesc pg_class_scan;
|
||||
ScanKeyData key[1];
|
||||
|
||||
ScanKeyInit(&key[0],
|
||||
@@ -2146,10 +2147,10 @@ index_update_stats(Relation rel,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(relid));
|
||||
|
||||
pg_class_scan = heap_beginscan_catalog(pg_class, 1, key);
|
||||
pg_class_scan = table_beginscan_catalog(pg_class, 1, key);
|
||||
tuple = heap_getnext(pg_class_scan, ForwardScanDirection);
|
||||
tuple = heap_copytuple(tuple);
|
||||
heap_endscan(pg_class_scan);
|
||||
table_endscan(pg_class_scan);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -2431,7 +2432,7 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
bool allow_sync,
|
||||
IndexBuildCallback callback,
|
||||
void *callback_state,
|
||||
HeapScanDesc scan)
|
||||
TableScanDesc scan)
|
||||
{
|
||||
return IndexBuildHeapRangeScan(heapRelation, indexRelation,
|
||||
indexInfo, allow_sync,
|
||||
@@ -2460,8 +2461,9 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
BlockNumber numblocks,
|
||||
IndexBuildCallback callback,
|
||||
void *callback_state,
|
||||
HeapScanDesc scan)
|
||||
TableScanDesc scan)
|
||||
{
|
||||
HeapScanDesc hscan;
|
||||
bool is_system_catalog;
|
||||
bool checking_uniqueness;
|
||||
HeapTuple heapTuple;
|
||||
@@ -2502,8 +2504,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
*/
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
|
||||
&TTSOpsHeapTuple);
|
||||
slot = table_slot_create(heapRelation, NULL);
|
||||
|
||||
/* Arrange for econtext's scan tuple to be the tuple under test */
|
||||
econtext->ecxt_scantuple = slot;
|
||||
@@ -2540,12 +2541,12 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
else
|
||||
snapshot = SnapshotAny;
|
||||
|
||||
scan = heap_beginscan_strat(heapRelation, /* relation */
|
||||
snapshot, /* snapshot */
|
||||
0, /* number of keys */
|
||||
NULL, /* scan key */
|
||||
true, /* buffer access strategy OK */
|
||||
allow_sync); /* syncscan OK? */
|
||||
scan = table_beginscan_strat(heapRelation, /* relation */
|
||||
snapshot, /* snapshot */
|
||||
0, /* number of keys */
|
||||
NULL, /* scan key */
|
||||
true, /* buffer access strategy OK */
|
||||
allow_sync); /* syncscan OK? */
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -2561,6 +2562,8 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
snapshot = scan->rs_snapshot;
|
||||
}
|
||||
|
||||
hscan = (HeapScanDesc) scan;
|
||||
|
||||
/*
|
||||
* Must call GetOldestXmin() with SnapshotAny. Should never call
|
||||
* GetOldestXmin() with MVCC snapshot. (It's especially worth checking
|
||||
@@ -2618,15 +2621,15 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
* tuple per HOT-chain --- else we could create more than one index
|
||||
* entry pointing to the same root tuple.
|
||||
*/
|
||||
if (scan->rs_cblock != root_blkno)
|
||||
if (hscan->rs_cblock != root_blkno)
|
||||
{
|
||||
Page page = BufferGetPage(scan->rs_cbuf);
|
||||
Page page = BufferGetPage(hscan->rs_cbuf);
|
||||
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
heap_get_root_tuples(page, root_offsets);
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
root_blkno = scan->rs_cblock;
|
||||
root_blkno = hscan->rs_cblock;
|
||||
}
|
||||
|
||||
if (snapshot == SnapshotAny)
|
||||
@@ -2643,7 +2646,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
* be conservative about it. (This remark is still correct even
|
||||
* with HOT-pruning: our pin on the buffer prevents pruning.)
|
||||
*/
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
|
||||
/*
|
||||
* The criteria for counting a tuple as live in this block need to
|
||||
@@ -2652,7 +2655,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
* values, e.g. when there are many recently-dead tuples.
|
||||
*/
|
||||
switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
|
||||
scan->rs_cbuf))
|
||||
hscan->rs_cbuf))
|
||||
{
|
||||
case HEAPTUPLE_DEAD:
|
||||
/* Definitely dead, we can ignore it */
|
||||
@@ -2733,7 +2736,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
/*
|
||||
* Must drop the lock on the buffer before we wait
|
||||
*/
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
XactLockTableWait(xwait, heapRelation,
|
||||
&heapTuple->t_self,
|
||||
XLTW_InsertIndexUnique);
|
||||
@@ -2800,7 +2803,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
/*
|
||||
* Must drop the lock on the buffer before we wait
|
||||
*/
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
XactLockTableWait(xwait, heapRelation,
|
||||
&heapTuple->t_self,
|
||||
XLTW_InsertIndexUnique);
|
||||
@@ -2852,7 +2855,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
break;
|
||||
}
|
||||
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
if (!indexIt)
|
||||
continue;
|
||||
@@ -2867,7 +2870,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
MemoryContextReset(econtext->ecxt_per_tuple_memory);
|
||||
|
||||
/* Set up for predicate or expression evaluation */
|
||||
ExecStoreHeapTuple(heapTuple, slot, false);
|
||||
ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
|
||||
|
||||
/*
|
||||
* In a partial index, discard tuples that don't satisfy the
|
||||
@@ -2931,7 +2934,7 @@ IndexBuildHeapRangeScan(Relation heapRelation,
|
||||
}
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
table_endscan(scan);
|
||||
|
||||
/* we can now forget our snapshot, if set and registered by us */
|
||||
if (need_unregister_snapshot)
|
||||
@@ -2966,8 +2969,7 @@ IndexCheckExclusion(Relation heapRelation,
|
||||
Relation indexRelation,
|
||||
IndexInfo *indexInfo)
|
||||
{
|
||||
HeapScanDesc scan;
|
||||
HeapTuple heapTuple;
|
||||
TableScanDesc scan;
|
||||
Datum values[INDEX_MAX_KEYS];
|
||||
bool isnull[INDEX_MAX_KEYS];
|
||||
ExprState *predicate;
|
||||
@@ -2990,8 +2992,7 @@ IndexCheckExclusion(Relation heapRelation,
|
||||
*/
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
|
||||
&TTSOpsHeapTuple);
|
||||
slot = table_slot_create(heapRelation, NULL);
|
||||
|
||||
/* Arrange for econtext's scan tuple to be the tuple under test */
|
||||
econtext->ecxt_scantuple = slot;
|
||||
@@ -3003,22 +3004,17 @@ IndexCheckExclusion(Relation heapRelation,
|
||||
* Scan all live tuples in the base relation.
|
||||
*/
|
||||
snapshot = RegisterSnapshot(GetLatestSnapshot());
|
||||
scan = heap_beginscan_strat(heapRelation, /* relation */
|
||||
snapshot, /* snapshot */
|
||||
0, /* number of keys */
|
||||
NULL, /* scan key */
|
||||
true, /* buffer access strategy OK */
|
||||
true); /* syncscan OK */
|
||||
scan = table_beginscan_strat(heapRelation, /* relation */
|
||||
snapshot, /* snapshot */
|
||||
0, /* number of keys */
|
||||
NULL, /* scan key */
|
||||
true, /* buffer access strategy OK */
|
||||
true); /* syncscan OK */
|
||||
|
||||
while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
while (table_scan_getnextslot(scan, ForwardScanDirection, slot))
|
||||
{
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
MemoryContextReset(econtext->ecxt_per_tuple_memory);
|
||||
|
||||
/* Set up for predicate or expression evaluation */
|
||||
ExecStoreHeapTuple(heapTuple, slot, false);
|
||||
|
||||
/*
|
||||
* In a partial index, ignore tuples that don't satisfy the predicate.
|
||||
*/
|
||||
@@ -3042,11 +3038,13 @@ IndexCheckExclusion(Relation heapRelation,
|
||||
*/
|
||||
check_exclusion_constraint(heapRelation,
|
||||
indexRelation, indexInfo,
|
||||
&(heapTuple->t_self), values, isnull,
|
||||
&(slot->tts_tid), values, isnull,
|
||||
estate, true);
|
||||
|
||||
MemoryContextReset(econtext->ecxt_per_tuple_memory);
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
table_endscan(scan);
|
||||
UnregisterSnapshot(snapshot);
|
||||
|
||||
ExecDropSingleTupleTableSlot(slot);
|
||||
@@ -3281,7 +3279,8 @@ validate_index_heapscan(Relation heapRelation,
|
||||
Snapshot snapshot,
|
||||
v_i_state *state)
|
||||
{
|
||||
HeapScanDesc scan;
|
||||
TableScanDesc scan;
|
||||
HeapScanDesc hscan;
|
||||
HeapTuple heapTuple;
|
||||
Datum values[INDEX_MAX_KEYS];
|
||||
bool isnull[INDEX_MAX_KEYS];
|
||||
@@ -3324,12 +3323,13 @@ validate_index_heapscan(Relation heapRelation,
|
||||
* here, because it's critical that we read from block zero forward to
|
||||
* match the sorted TIDs.
|
||||
*/
|
||||
scan = heap_beginscan_strat(heapRelation, /* relation */
|
||||
snapshot, /* snapshot */
|
||||
0, /* number of keys */
|
||||
NULL, /* scan key */
|
||||
true, /* buffer access strategy OK */
|
||||
false); /* syncscan not OK */
|
||||
scan = table_beginscan_strat(heapRelation, /* relation */
|
||||
snapshot, /* snapshot */
|
||||
0, /* number of keys */
|
||||
NULL, /* scan key */
|
||||
true, /* buffer access strategy OK */
|
||||
false); /* syncscan not OK */
|
||||
hscan = (HeapScanDesc) scan;
|
||||
|
||||
/*
|
||||
* Scan all tuples matching the snapshot.
|
||||
@@ -3358,17 +3358,17 @@ validate_index_heapscan(Relation heapRelation,
|
||||
* already-passed-over tuplesort output TIDs of the current page. We
|
||||
* clear that array here, when advancing onto a new heap page.
|
||||
*/
|
||||
if (scan->rs_cblock != root_blkno)
|
||||
if (hscan->rs_cblock != root_blkno)
|
||||
{
|
||||
Page page = BufferGetPage(scan->rs_cbuf);
|
||||
Page page = BufferGetPage(hscan->rs_cbuf);
|
||||
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
heap_get_root_tuples(page, root_offsets);
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
memset(in_index, 0, sizeof(in_index));
|
||||
|
||||
root_blkno = scan->rs_cblock;
|
||||
root_blkno = hscan->rs_cblock;
|
||||
}
|
||||
|
||||
/* Convert actual tuple TID to root TID */
|
||||
@@ -3493,7 +3493,7 @@ validate_index_heapscan(Relation heapRelation,
|
||||
}
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
table_endscan(scan);
|
||||
|
||||
ExecDropSingleTupleTableSlot(slot);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user