From 0b96e734c5904ee26b8f622b3348620dda4bfee5 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 12 Jan 2026 13:14:58 -0500 Subject: [PATCH] heapam: Add batch mode mvcc check and use it in page mode There are two reasons for doing so: 1) It is generally faster to perform checks in a batched fashion and making sequential scans faster is nice. 2) We would like to stop setting hint bits while pages are being written out. The necessary locking becomes visible for page mode scans, if done for every tuple. With batching, the overhead can be amortized to only happen once per page. There are substantial further optimization opportunities along these lines: - Right now HeapTupleSatisfiesMVCCBatch() simply uses the single-tuple HeapTupleSatisfiesMVCC(), relying on the compiler to inline it. We could instead write an explicitly optimized version that avoids repeated xid tests. - Introduce batched version of the serializability test - Introduce batched version of HeapTupleSatisfiesVacuum Reviewed-by: Melanie Plageman Discussion: https://postgr.es/m/6rgb2nvhyvnszz4ul3wfzlf5rheb2kkwrglthnna7qhe24onwr@vw27225tkyar --- src/backend/access/heap/heapam.c | 90 +++++++++++++++------ src/backend/access/heap/heapam_visibility.c | 43 ++++++++++ src/include/access/heapam.h | 17 ++++ src/tools/pgindent/typedefs.list | 1 + 4 files changed, 128 insertions(+), 23 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index ad9d6338ec2..f30a56ecf55 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -522,42 +522,86 @@ page_collect_tuples(HeapScanDesc scan, Snapshot snapshot, BlockNumber block, int lines, bool all_visible, bool check_serializable) { + Oid relid = RelationGetRelid(scan->rs_base.rs_rd); int ntup = 0; - OffsetNumber lineoff; + int nvis = 0; + BatchMVCCState batchmvcc; - for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++) + /* page at a time should have been disabled otherwise */ + Assert(IsMVCCSnapshot(snapshot)); + + /* first find all tuples on the page */ + for (OffsetNumber lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++) { ItemId lpp = PageGetItemId(page, lineoff); - HeapTupleData loctup; - bool valid; + HeapTuple tup; - if (!ItemIdIsNormal(lpp)) + if (unlikely(!ItemIdIsNormal(lpp))) continue; - loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp); - loctup.t_len = ItemIdGetLength(lpp); - loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd); - ItemPointerSet(&(loctup.t_self), block, lineoff); - - if (all_visible) - valid = true; - else - valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer); - - if (check_serializable) - HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd, - &loctup, buffer, snapshot); - - if (valid) + /* + * If the page is not all-visible or we need to check serializability, + * maintain enough state to be able to refind the tuple efficiently, + * without again first needing to fetch the item and then via that the + * tuple. + */ + if (!all_visible || check_serializable) { - scan->rs_vistuples[ntup] = lineoff; - ntup++; + tup = &batchmvcc.tuples[ntup]; + + tup->t_data = (HeapTupleHeader) PageGetItem(page, lpp); + tup->t_len = ItemIdGetLength(lpp); + tup->t_tableOid = relid; + ItemPointerSet(&(tup->t_self), block, lineoff); } + + /* + * If the page is all visible, these fields otherwise won't be + * populated in loop below. + */ + if (all_visible) + { + if (check_serializable) + { + batchmvcc.visible[ntup] = true; + } + scan->rs_vistuples[ntup] = lineoff; + } + + ntup++; } Assert(ntup <= MaxHeapTuplesPerPage); - return ntup; + /* + * Unless the page is all visible, test visibility for all tuples one go. + * That is considerably more efficient than calling + * HeapTupleSatisfiesMVCC() one-by-one. + */ + if (all_visible) + nvis = ntup; + else + nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer, + ntup, + &batchmvcc, + scan->rs_vistuples); + + /* + * So far we don't have batch API for testing serializabilty, so do so + * one-by-one. + */ + if (check_serializable) + { + for (int i = 0; i < ntup; i++) + { + HeapCheckForSerializableConflictOut(batchmvcc.visible[i], + scan->rs_base.rs_rd, + &batchmvcc.tuples[i], + buffer, snapshot); + } + } + + return nvis; } /* diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c index 9a034d5c9e8..75ae268d753 100644 --- a/src/backend/access/heap/heapam_visibility.c +++ b/src/backend/access/heap/heapam_visibility.c @@ -1598,6 +1598,49 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot, return true; } +/* + * Perform HeaptupleSatisfiesMVCC() on each passed in tuple. This is more + * efficient than doing HeapTupleSatisfiesMVCC() one-by-one. + * + * To be checked tuples are passed via BatchMVCCState->tuples. Each tuple's + * visibility is stored in batchmvcc->visible[]. In addition, + * ->vistuples_dense is set to contain the offsets of visible tuples. + * + * The reason this is more efficient than HeapTupleSatisfiesMVCC() is that it + * avoids a cross-translation-unit function call for each tuple and allows the + * compiler to optimize across calls to HeapTupleSatisfiesMVCC. In the future + * it will also allow more efficient setting of hint bits. + * + * Returns the number of visible tuples. + */ +int +HeapTupleSatisfiesMVCCBatch(Snapshot snapshot, Buffer buffer, + int ntups, + BatchMVCCState *batchmvcc, + OffsetNumber *vistuples_dense) +{ + int nvis = 0; + + Assert(IsMVCCSnapshot(snapshot)); + + for (int i = 0; i < ntups; i++) + { + bool valid; + HeapTuple tup = &batchmvcc->tuples[i]; + + valid = HeapTupleSatisfiesMVCC(tup, snapshot, buffer); + batchmvcc->visible[i] = valid; + + if (likely(valid)) + { + vistuples_dense[nvis] = tup->t_self.ip_posid; + nvis++; + } + } + + return nvis; +} + /* * HeapTupleSatisfiesVisibility * True iff heap tuple satisfies a time qual. diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index ce48fac42ba..3c0961ab36b 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -449,6 +449,23 @@ extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple); extern bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest); +/* + * Some of the input/output to/from HeapTupleSatisfiesMVCCBatch() is passed + * via this struct, as otherwise the increased number of arguments to + * HeapTupleSatisfiesMVCCBatch() leads to on-stack argument passing on x86-64, + * which causes a small regression. + */ +typedef struct BatchMVCCState +{ + HeapTupleData tuples[MaxHeapTuplesPerPage]; + bool visible[MaxHeapTuplesPerPage]; +} BatchMVCCState; + +extern int HeapTupleSatisfiesMVCCBatch(Snapshot snapshot, Buffer buffer, + int ntups, + BatchMVCCState *batchmvcc, + OffsetNumber *vistuples_dense); + /* * To avoid leaking too much knowledge about reorderbuffer implementation * details this is implemented in reorderbuffer.c not heapam_visibility.c diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 09e7f1d420e..14dec2d49c1 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -255,6 +255,7 @@ Barrier BaseBackupCmd BaseBackupTargetHandle BaseBackupTargetType +BatchMVCCState BeginDirectModify_function BeginForeignInsert_function BeginForeignModify_function