1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-19 13:42:17 +03:00

Refactor nbtree insertion scankeys.

Use dedicated struct to represent nbtree insertion scan keys.  Having a
dedicated struct makes the difference between search type scankeys and
insertion scankeys a lot clearer, and simplifies the signature of
several related functions.  This is based on a suggestion by Andrey
Lepikhov.

Streamline how unique index insertions cache binary search progress.
Cache the state of in-progress binary searches within _bt_check_unique()
for later instead of having callers avoid repeating the binary search in
an ad-hoc manner.  This makes it easy to add a new optimization:
_bt_check_unique() now falls out of its loop immediately in the common
case where it's already clear that there couldn't possibly be a
duplicate.

The new _bt_check_unique() scheme makes it a lot easier to manage cached
binary search effort afterwards, from within _bt_findinsertloc().  This
is needed for the upcoming patch to make nbtree tuples unique by
treating heap TID as a final tiebreaker column.  Unique key binary
searches need to restore lower and upper bounds.  They cannot simply
continue to use the >= lower bound as the offset to insert at, because
the heap TID tiebreaker column must be used in comparisons for the
restored binary search (unlike the original _bt_check_unique() binary
search, where scankey's heap TID column must be omitted).

Author: Peter Geoghegan, Heikki Linnakangas
Reviewed-By: Heikki Linnakangas, Andrey Lepikhov
Discussion: https://postgr.es/m/CAH2-WzmE6AhUdk9NdWBf4K3HjWXZBX3+umC7mH7+WDrKcRtsOw@mail.gmail.com
This commit is contained in:
Peter Geoghegan
2019-03-20 09:30:57 -07:00
parent 550b9d26f8
commit e5adcb789d
9 changed files with 532 additions and 390 deletions

View File

@@ -884,7 +884,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
{
Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
randomAccess);
ScanKey indexScanKey;
BTScanInsert indexScanKey;
MemoryContext oldcontext;
int i;
@@ -919,7 +919,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
indexScanKey = _bt_mkscankey_nodata(indexRel);
indexScanKey = _bt_mkscankey(indexRel, NULL);
if (state->indexInfo->ii_Expressions != NULL)
{
@@ -945,7 +945,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
for (i = 0; i < state->nKeys; i++)
{
SortSupport sortKey = state->sortKeys + i;
ScanKey scanKey = indexScanKey + i;
ScanKey scanKey = indexScanKey->scankeys + i;
int16 strategy;
sortKey->ssup_cxt = CurrentMemoryContext;
@@ -964,7 +964,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
}
_bt_freeskey(indexScanKey);
pfree(indexScanKey);
MemoryContextSwitchTo(oldcontext);
@@ -981,7 +981,7 @@ tuplesort_begin_index_btree(Relation heapRel,
{
Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
randomAccess);
ScanKey indexScanKey;
BTScanInsert indexScanKey;
MemoryContext oldcontext;
int i;
@@ -1014,7 +1014,7 @@ tuplesort_begin_index_btree(Relation heapRel,
state->indexRel = indexRel;
state->enforceUnique = enforceUnique;
indexScanKey = _bt_mkscankey_nodata(indexRel);
indexScanKey = _bt_mkscankey(indexRel, NULL);
/* Prepare SortSupport data for each column */
state->sortKeys = (SortSupport) palloc0(state->nKeys *
@@ -1023,7 +1023,7 @@ tuplesort_begin_index_btree(Relation heapRel,
for (i = 0; i < state->nKeys; i++)
{
SortSupport sortKey = state->sortKeys + i;
ScanKey scanKey = indexScanKey + i;
ScanKey scanKey = indexScanKey->scankeys + i;
int16 strategy;
sortKey->ssup_cxt = CurrentMemoryContext;
@@ -1042,7 +1042,7 @@ tuplesort_begin_index_btree(Relation heapRel,
PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
}
_bt_freeskey(indexScanKey);
pfree(indexScanKey);
MemoryContextSwitchTo(oldcontext);