1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

nbtree: Allocate new pages in separate function.

Split nbtree's _bt_getbuf function is two: code that read locks or write
locks existing pages remains in _bt_getbuf, while code that deals with
allocating new pages is moved to a new, dedicated function called
_bt_allocbuf.  This simplifies most _bt_getbuf callers, since it is no
longer necessary for them to pass a heaprel argument.  Many of the
changes to nbtree from commit 61b313e4 can be reverted.  This minimizes
the divergence between HEAD/PostgreSQL 16 and earlier release branches.

_bt_allocbuf replaces the previous nbtree idiom of passing P_NEW to
_bt_getbuf.  There are only 3 affected call sites, all of which continue
to pass a heaprel for recovery conflict purposes.  Note that nbtree's
use of P_NEW was superficial; nbtree never actually relied on the P_NEW
code paths in bufmgr.c, so this change is strictly mechanical.

GiST already took the same approach; it has a dedicated function for
allocating new pages called gistNewBuffer().  That factor allowed commit
61b313e4 to make much more targeted changes to GiST.

Author: Peter Geoghegan <pg@bowt.ie>
Reviewed-By: Heikki Linnakangas <hlinnaka@iki.fi>
Discussion: https://postgr.es/m/CAH2-Wz=8Z9qY58bjm_7TAHgtW6RzZ5Ke62q5emdCEy9BAzwhmg@mail.gmail.com
This commit is contained in:
Peter Geoghegan
2023-06-10 14:08:25 -07:00
parent fe879ae3a8
commit d088ba5a5a
12 changed files with 271 additions and 257 deletions

View File

@ -183,7 +183,6 @@ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
OffsetNumber upperbound);
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
Relation heaprel,
IndexTuple itup);
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
Page page, OffsetNumber offset);
@ -332,7 +331,7 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed,
RelationGetRelationName(indrel))));
/* Extract metadata from metapage, and sanitize it in passing */
_bt_metaversion(indrel, heaprel, &heapkeyspace, &allequalimage);
_bt_metaversion(indrel, &heapkeyspace, &allequalimage);
if (allequalimage && !heapkeyspace)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
@ -1259,7 +1258,7 @@ bt_target_page_check(BtreeCheckState *state)
}
/* Build insertion scankey for current page offset */
skey = bt_mkscankey_pivotsearch(state->rel, state->heaprel, itup);
skey = bt_mkscankey_pivotsearch(state->rel, itup);
/*
* Make sure tuple size does not exceed the relevant BTREE_VERSION
@ -1769,7 +1768,7 @@ bt_right_page_check_scankey(BtreeCheckState *state)
* memory remaining allocated.
*/
firstitup = (IndexTuple) PageGetItem(rightpage, rightitem);
return bt_mkscankey_pivotsearch(state->rel, state->heaprel, firstitup);
return bt_mkscankey_pivotsearch(state->rel, firstitup);
}
/*
@ -2682,7 +2681,7 @@ bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
Buffer lbuf;
bool exists;
key = _bt_mkscankey(state->rel, state->heaprel, itup);
key = _bt_mkscankey(state->rel, itup);
Assert(key->heapkeyspace && key->scantid != NULL);
/*
@ -2695,7 +2694,7 @@ bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
*/
Assert(state->readonly && state->rootdescend);
exists = false;
stack = _bt_search(state->rel, state->heaprel, key, &lbuf, BT_READ, NULL);
stack = _bt_search(state->rel, NULL, key, &lbuf, BT_READ, NULL);
if (BufferIsValid(lbuf))
{
@ -3134,11 +3133,11 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
* the scankey is greater.
*/
static inline BTScanInsert
bt_mkscankey_pivotsearch(Relation rel, Relation heaprel, IndexTuple itup)
bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
{
BTScanInsert skey;
skey = _bt_mkscankey(rel, heaprel, itup);
skey = _bt_mkscankey(rel, itup);
skey->pivotsearch = true;
return skey;