1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-12 21:01:52 +03:00

First steps towards index scans with heap access decoupled from index

access: define new index access method functions 'amgetmulti' that can
fetch multiple TIDs per call.  (The functions exist but are totally
untested as yet.)  Since I was modifying pg_am anyway, remove the
no-longer-needed 'rel' parameter from amcostestimate functions, and
also remove the vestigial amowner column that was creating useless
work for Alvaro's shared-object-dependencies project.
Initdb forced due to changes in pg_am.
This commit is contained in:
Tom Lane
2005-03-27 23:53:05 +00:00
parent 351519affc
commit bf3dbb5881
20 changed files with 535 additions and 174 deletions

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.126 2005/03/21 01:23:59 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.127 2005/03/27 23:53:00 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -340,6 +340,79 @@ btgettuple(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(res);
}
/*
* btgetmulti() -- get multiple tuples at once
*
* This is a somewhat generic implementation: it avoids the _bt_restscan
* overhead, but there's no smarts about picking especially good stopping
* points such as index page boundaries.
*/
Datum
btgetmulti(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
int32 max_tids = PG_GETARG_INT32(2);
int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
BTScanOpaque so = (BTScanOpaque) scan->opaque;
bool res = true;
int32 ntids = 0;
/*
* Restore prior state if we were already called at least once.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
_bt_restscan(scan);
while (ntids < max_tids)
{
/*
* Start scan, or advance to next tuple.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
res = _bt_next(scan, ForwardScanDirection);
else
res = _bt_first(scan, ForwardScanDirection);
/*
* Skip killed tuples if asked to.
*/
if (scan->ignore_killed_tuples)
{
while (res)
{
Page page;
OffsetNumber offnum;
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->btso_curbuf);
if (!ItemIdDeleted(PageGetItemId(page, offnum)))
break;
res = _bt_next(scan, ForwardScanDirection);
}
}
if (!res)
break;
/* Save tuple ID, and continue scanning */
tids[ntids] = scan->xs_ctup.t_self;
ntids++;
}
/*
* Save heap TID to use it in _bt_restscan. Then release the read
* lock on the buffer so that we aren't blocking other backends.
*/
if (res)
{
((BTScanOpaque) scan->opaque)->curHeapIptr = scan->xs_ctup.t_self;
LockBuffer(((BTScanOpaque) scan->opaque)->btso_curbuf,
BUFFER_LOCK_UNLOCK);
}
*returned_tids = ntids;
PG_RETURN_BOOL(res);
}
/*
* btbeginscan() -- start a scan on a btree index
*/