1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-15 19:21:59 +03:00

tableam: Add and use table_fetch_row_version().

This is essentially the tableam version of heapam_fetch(),
i.e. fetching a tuple identified by a tid, performing visibility
checks.

Note that this different from table_index_fetch_tuple(), which is for
index lookups. It therefore has to handle a tid pointing to an earlier
version of a tuple if the AM uses an optimization like heap's HOT. Add
comments to that end.

This commit removes the stats_relation argument from heap_fetch, as
it's been unused for a long time.

Author: Andres Freund
Reviewed-By: Haribabu Kommi
Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
This commit is contained in:
Andres Freund
2019-03-25 00:13:42 -07:00
parent c77e12208c
commit 9a8ee1dc65
9 changed files with 91 additions and 111 deletions

View File

@ -1388,8 +1388,7 @@ bool
heap_fetch(Relation relation,
Snapshot snapshot,
HeapTuple tuple,
Buffer *userbuf,
Relation stats_relation)
Buffer *userbuf)
{
ItemPointer tid = &(tuple->t_self);
ItemId lp;
@ -1468,10 +1467,6 @@ heap_fetch(Relation relation,
*/
*userbuf = buffer;
/* Count the successful fetch against appropriate rel, if any */
if (stats_relation != NULL)
pgstat_count_heap_fetch(stats_relation);
return true;
}
@ -5097,7 +5092,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
block = ItemPointerGetBlockNumber(&tupid);
ItemPointerCopy(&tupid, &(mytup.t_self));
if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, NULL))
if (!heap_fetch(rel, SnapshotAny, &mytup, &buf))
{
/*
* if we fail to find the updated version of the tuple, it's

View File

@ -148,6 +148,30 @@ heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
* ------------------------------------------------------------------------
*/
static bool
heapam_fetch_row_version(Relation relation,
ItemPointer tid,
Snapshot snapshot,
TupleTableSlot *slot)
{
BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
Buffer buffer;
Assert(TTS_IS_BUFFERTUPLE(slot));
bslot->base.tupdata.t_self = *tid;
if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
{
/* store in slot, transferring existing pin */
ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
slot->tts_tableOid = RelationGetRelid(relation);
return true;
}
return false;
}
static bool
heapam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
Snapshot snapshot)
@ -338,7 +362,7 @@ tuple_lock_retry:
errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
tuple->t_self = *tid;
if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, NULL))
if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
{
/*
* If xmin isn't what we're expecting, the slot must have
@ -517,6 +541,7 @@ static const TableAmRoutine heapam_methods = {
.tuple_update = heapam_tuple_update,
.tuple_lock = heapam_tuple_lock,
.tuple_fetch_row_version = heapam_fetch_row_version,
.tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
};