mirror of
https://github.com/postgres/postgres.git
synced 2025-11-09 06:21:09 +03:00
Tweak indexscan machinery to avoid taking an AccessShareLock on an index
if we already have a stronger lock due to the index's table being the update target table of the query. Same optimization I applied earlier at the table level. There doesn't seem to be much interest in the more radical idea of not locking indexes at all, so do what we can ...
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.56 2005/11/22 18:17:06 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.57 2005/12/03 05:50:59 tgl Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -1142,8 +1142,8 @@ toast_delete_datum(Relation rel, Datum value)
|
||||
/*
|
||||
* Find the chunks by index
|
||||
*/
|
||||
toastscan = index_beginscan(toastrel, toastidx, SnapshotToast,
|
||||
1, &toastkey);
|
||||
toastscan = index_beginscan(toastrel, toastidx, true,
|
||||
SnapshotToast, 1, &toastkey);
|
||||
while ((toasttup = index_getnext(toastscan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
/*
|
||||
@@ -1219,8 +1219,8 @@ toast_fetch_datum(varattrib *attr)
|
||||
*/
|
||||
nextidx = 0;
|
||||
|
||||
toastscan = index_beginscan(toastrel, toastidx, SnapshotToast,
|
||||
1, &toastkey);
|
||||
toastscan = index_beginscan(toastrel, toastidx, true,
|
||||
SnapshotToast, 1, &toastkey);
|
||||
while ((ttup = index_getnext(toastscan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
/*
|
||||
@@ -1394,8 +1394,8 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
|
||||
* The index is on (valueid, chunkidx) so they will come in order
|
||||
*/
|
||||
nextidx = startchunk;
|
||||
toastscan = index_beginscan(toastrel, toastidx, SnapshotToast,
|
||||
nscankeys, toastkey);
|
||||
toastscan = index_beginscan(toastrel, toastidx, true,
|
||||
SnapshotToast, nscankeys, toastkey);
|
||||
while ((ttup = index_getnext(toastscan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
/*
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.51 2005/11/22 18:17:06 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.52 2005/12/03 05:51:00 tgl Exp $
|
||||
*
|
||||
* NOTES
|
||||
* many of the old access method routines have been turned into
|
||||
@@ -86,7 +86,8 @@ RelationGetIndexScan(Relation indexRelation,
|
||||
else
|
||||
scan->keyData = NULL;
|
||||
|
||||
scan->is_multiscan = false; /* caller may change this */
|
||||
scan->is_multiscan = false; /* caller may change this */
|
||||
scan->have_lock = false; /* ditto */
|
||||
scan->kill_prior_tuple = false;
|
||||
scan->ignore_killed_tuples = true; /* default setting */
|
||||
scan->keys_are_unique = false; /* may be set by index AM */
|
||||
@@ -211,8 +212,8 @@ systable_beginscan(Relation heapRelation,
|
||||
key[i].sk_attno = i + 1;
|
||||
}
|
||||
|
||||
sysscan->iscan = index_beginscan(heapRelation, irel, snapshot,
|
||||
nkeys, key);
|
||||
sysscan->iscan = index_beginscan(heapRelation, irel, true,
|
||||
snapshot, nkeys, key);
|
||||
sysscan->scan = NULL;
|
||||
}
|
||||
else
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.86 2005/10/15 02:49:09 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.87 2005/12/03 05:51:00 tgl Exp $
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* index_open - open an index relation by relation OID
|
||||
@@ -111,6 +111,7 @@ do { \
|
||||
} while(0)
|
||||
|
||||
static IndexScanDesc index_beginscan_internal(Relation indexRelation,
|
||||
bool need_index_lock,
|
||||
int nkeys, ScanKey key);
|
||||
|
||||
|
||||
@@ -229,16 +230,23 @@ index_insert(Relation indexRelation,
|
||||
* heapRelation link (nor the snapshot). However, the caller had better
|
||||
* be holding some kind of lock on the heap relation in any case, to ensure
|
||||
* no one deletes it (or the index) out from under us.
|
||||
*
|
||||
* Most callers should pass need_index_lock = true to cause the index code
|
||||
* to take AccessShareLock on the index for the duration of the scan. But
|
||||
* if it is known that a lock is already held on the index, pass false to
|
||||
* skip taking an unnecessary lock.
|
||||
*/
|
||||
IndexScanDesc
|
||||
index_beginscan(Relation heapRelation,
|
||||
Relation indexRelation,
|
||||
bool need_index_lock,
|
||||
Snapshot snapshot,
|
||||
int nkeys, ScanKey key)
|
||||
{
|
||||
IndexScanDesc scan;
|
||||
|
||||
scan = index_beginscan_internal(indexRelation, nkeys, key);
|
||||
scan = index_beginscan_internal(indexRelation, need_index_lock,
|
||||
nkeys, key);
|
||||
|
||||
/*
|
||||
* Save additional parameters into the scandesc. Everything else was set
|
||||
@@ -259,12 +267,14 @@ index_beginscan(Relation heapRelation,
|
||||
*/
|
||||
IndexScanDesc
|
||||
index_beginscan_multi(Relation indexRelation,
|
||||
bool need_index_lock,
|
||||
Snapshot snapshot,
|
||||
int nkeys, ScanKey key)
|
||||
{
|
||||
IndexScanDesc scan;
|
||||
|
||||
scan = index_beginscan_internal(indexRelation, nkeys, key);
|
||||
scan = index_beginscan_internal(indexRelation, need_index_lock,
|
||||
nkeys, key);
|
||||
|
||||
/*
|
||||
* Save additional parameters into the scandesc. Everything else was set
|
||||
@@ -281,6 +291,7 @@ index_beginscan_multi(Relation indexRelation,
|
||||
*/
|
||||
static IndexScanDesc
|
||||
index_beginscan_internal(Relation indexRelation,
|
||||
bool need_index_lock,
|
||||
int nkeys, ScanKey key)
|
||||
{
|
||||
IndexScanDesc scan;
|
||||
@@ -291,13 +302,15 @@ index_beginscan_internal(Relation indexRelation,
|
||||
RelationIncrementReferenceCount(indexRelation);
|
||||
|
||||
/*
|
||||
* Acquire AccessShareLock for the duration of the scan
|
||||
* Acquire AccessShareLock for the duration of the scan, unless caller
|
||||
* says it already has lock on the index.
|
||||
*
|
||||
* Note: we could get an SI inval message here and consequently have to
|
||||
* rebuild the relcache entry. The refcount increment above ensures that
|
||||
* we will rebuild it and not just flush it...
|
||||
*/
|
||||
LockRelation(indexRelation, AccessShareLock);
|
||||
if (need_index_lock)
|
||||
LockRelation(indexRelation, AccessShareLock);
|
||||
|
||||
/*
|
||||
* LockRelation can clean rd_aminfo structure, so fill procedure after
|
||||
@@ -315,6 +328,9 @@ index_beginscan_internal(Relation indexRelation,
|
||||
Int32GetDatum(nkeys),
|
||||
PointerGetDatum(key)));
|
||||
|
||||
/* Save flag to tell index_endscan whether to release lock */
|
||||
scan->have_lock = need_index_lock;
|
||||
|
||||
return scan;
|
||||
}
|
||||
|
||||
@@ -380,7 +396,8 @@ index_endscan(IndexScanDesc scan)
|
||||
|
||||
/* Release index lock and refcount acquired by index_beginscan */
|
||||
|
||||
UnlockRelation(scan->indexRelation, AccessShareLock);
|
||||
if (scan->have_lock)
|
||||
UnlockRelation(scan->indexRelation, AccessShareLock);
|
||||
|
||||
RelationDecrementReferenceCount(scan->indexRelation);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user