1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-13 07:41:39 +03:00

Mark index entries "killed" when they are no longer visible to any

transaction, so as to avoid returning them out of the index AM.  Saves
repeated heap_fetch operations on frequently-updated rows.  Also detect
queries on unique keys (equality to all columns of a unique index), and
don't bother continuing scan once we have found first match.

Killing is implemented in the btree and hash AMs, but not yet in rtree
or gist, because there isn't an equally convenient place to do it in
those AMs (the outer amgetnext routine can't do it without re-pinning
the index page).

Did some small cleanup on APIs of HeapTupleSatisfies, heap_fetch, and
index_insert to make this a little easier.
This commit is contained in:
Tom Lane
2002-05-24 18:57:57 +00:00
parent 2f2d05763d
commit 3f4d488022
30 changed files with 498 additions and 273 deletions

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.92 2002/05/20 23:51:40 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.93 2002/05/24 18:57:55 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -294,9 +294,9 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);
#endif
InsertIndexResult res;
IndexTuple itup;
@ -1607,6 +1607,8 @@ gistbulkdelete(PG_FUNCTION_ARGS)
/* walk through the entire index */
iscan = index_beginscan(NULL, rel, SnapshotAny, 0, (ScanKey) NULL);
/* including killed tuples */
iscan->ignore_killed_tuples = false;
while (index_getnext_indexitem(iscan, ForwardScanDirection))
{