1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-27 07:42:10 +03:00

Massive commit to run PGINDENT on all *.c and *.h files.

This commit is contained in:
Bruce Momjian
1997-09-07 05:04:48 +00:00
parent 8fecd4febf
commit 1ccd423235
687 changed files with 150775 additions and 136888 deletions

View File

@@ -1,16 +1,16 @@
/*-------------------------------------------------------------------------
*
* hash.c--
* Implementation of Margo Seltzer's Hashing package for postgres.
* Implementation of Margo Seltzer's Hashing package for postgres.
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.12 1997/01/10 09:46:13 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.13 1997/09/07 04:37:49 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
* This file contains only the public interface routines.
*
*-------------------------------------------------------------------------
*/
@@ -26,452 +26,483 @@
#include <miscadmin.h>
#ifndef HAVE_MEMMOVE
# include <regex/utils.h>
#include <regex/utils.h>
#else
# include <string.h>
#include <string.h>
#endif
bool BuildingHash = false;
bool BuildingHash = false;
/*
* hashbuild() -- build a new hash index.
* hashbuild() -- build a new hash index.
*
* We use a global variable to record the fact that we're creating
* a new index. This is used to avoid high-concurrency locking,
* since the index won't be visible until this transaction commits
* and since building is guaranteed to be single-threaded.
* We use a global variable to record the fact that we're creating
* a new index. This is used to avoid high-concurrency locking,
* since the index won't be visible until this transaction commits
* and since building is guaranteed to be single-threaded.
*/
void
hashbuild(Relation heap,
Relation index,
int natts,
AttrNumber *attnum,
IndexStrategy istrat,
uint16 pcount,
Datum *params,
FuncIndexInfo *finfo,
PredInfo *predInfo)
Relation index,
int natts,
AttrNumber * attnum,
IndexStrategy istrat,
uint16 pcount,
Datum * params,
FuncIndexInfo * finfo,
PredInfo * predInfo)
{
HeapScanDesc hscan;
Buffer buffer;
HeapTuple htup;
IndexTuple itup;
TupleDesc htupdesc, itupdesc;
Datum *attdata;
bool *nulls;
InsertIndexResult res;
int nhtups, nitups;
int i;
HashItem hitem;
HeapScanDesc hscan;
Buffer buffer;
HeapTuple htup;
IndexTuple itup;
TupleDesc htupdesc,
itupdesc;
Datum *attdata;
bool *nulls;
InsertIndexResult res;
int nhtups,
nitups;
int i;
HashItem hitem;
#ifndef OMIT_PARTIAL_INDEX
ExprContext *econtext;
TupleTable tupleTable;
TupleTableSlot *slot;
ExprContext *econtext;
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
Oid hrelid, irelid;
Node *pred, *oldPred;
/* note that this is a new btree */
BuildingHash = true;
pred = predInfo->pred;
oldPred = predInfo->oldPred;
/* initialize the hash index metadata page (if this is a new index) */
if (oldPred == NULL)
_hash_metapinit(index);
/* get tuple descriptors for heap and index relations */
htupdesc = RelationGetTupleDescriptor(heap);
itupdesc = RelationGetTupleDescriptor(index);
/* get space for data items that'll appear in the index tuple */
attdata = (Datum *) palloc(natts * sizeof(Datum));
nulls = (bool *) palloc(natts * sizeof(bool));
/*
* If this is a predicate (partial) index, we will need to evaluate the
* predicate using ExecQual, which requires the current tuple to be in a
* slot of a TupleTable. In addition, ExecQual must have an ExprContext
* referring to that slot. Here, we initialize dummy TupleTable and
* ExprContext objects for this purpose. --Nels, Feb '92
*/
Oid hrelid,
irelid;
Node *pred,
*oldPred;
/* note that this is a new btree */
BuildingHash = true;
pred = predInfo->pred;
oldPred = predInfo->oldPred;
/* initialize the hash index metadata page (if this is a new index) */
if (oldPred == NULL)
_hash_metapinit(index);
/* get tuple descriptors for heap and index relations */
htupdesc = RelationGetTupleDescriptor(heap);
itupdesc = RelationGetTupleDescriptor(index);
/* get space for data items that'll appear in the index tuple */
attdata = (Datum *) palloc(natts * sizeof(Datum));
nulls = (bool *) palloc(natts * sizeof(bool));
/*
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
* be in a slot of a TupleTable. In addition, ExecQual must have an
* ExprContext referring to that slot. Here, we initialize dummy
* TupleTable and ExprContext objects for this purpose. --Nels, Feb
* '92
*/
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) {
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
FillDummyExprContext(econtext, slot, htupdesc, buffer);
}
else /* quiet the compiler */
if (pred != NULL || oldPred != NULL)
{
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
FillDummyExprContext(econtext, slot, htupdesc, buffer);
}
else
/* quiet the compiler */
{
econtext = NULL;
tupleTable = 0;
slot = 0;
}
#endif /* OMIT_PARTIAL_INDEX */
/* start a heap scan */
hscan = heap_beginscan(heap, 0, NowTimeQual, 0, (ScanKey) NULL);
htup = heap_getnext(hscan, 0, &buffer);
/* build the index */
nhtups = nitups = 0;
for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer)) {
nhtups++;
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL) {
/*SetSlotContents(slot, htup); */
#ifndef OMIT_PARTIAL_INDEX
slot->val = htup;
if (ExecQual((List*)oldPred, econtext) == true) {
nitups++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/* Skip this tuple if it doesn't satisfy the partial-index predicate */
if (pred != NULL) {
#ifndef OMIT_PARTIAL_INDEX
/*SetSlotContents(slot, htup); */
slot->val = htup;
if (ExecQual((List*)pred, econtext) == false)
continue;
#endif /* OMIT_PARTIAL_INDEX */
}
nitups++;
/*
* For the current heap tuple, extract all the attributes
* we use in this index, and note which are null.
*/
for (i = 1; i <= natts; i++) {
int attoff;
bool attnull;
/*
* Offsets are from the start of the tuple, and are
* zero-based; indices are one-based. The next call
* returns i - 1. That's data hiding for you.
*/
/* attoff = i - 1 */
attoff = AttrNumberGetAttrOffset(i);
/* below, attdata[attoff] set to equal some datum &
* attnull is changed to indicate whether or not the attribute
* is null for this tuple
*/
attdata[attoff] = GetIndexValue(htup,
htupdesc,
attoff,
attnum,
finfo,
&attnull,
buffer);
nulls[attoff] = (attnull ? 'n' : ' ');
}
/* form an index tuple and point it at the heap tuple */
itup = index_formtuple(itupdesc, attdata, nulls);
/*
* If the single index key is null, we don't insert it into
* the index. Hash tables support scans on '='.
* Relational algebra says that A = B
* returns null if either A or B is null. This
* means that no qualification used in an index scan could ever
* return true on a null attribute. It also means that indices
* can't be used by ISNULL or NOTNULL scans, but that's an
* artifact of the strategy map architecture chosen in 1986, not
* of the way nulls are handled here.
*/
if (itup->t_info & INDEX_NULL_MASK) {
pfree(itup);
continue;
}
itup->t_tid = htup->t_ctid;
hitem = _hash_formitem(itup);
res = _hash_doinsert(index, hitem);
pfree(hitem);
pfree(itup);
pfree(res);
}
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
if (pred != NULL || oldPred != NULL) {
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, true);
pfree(econtext);
#endif /* OMIT_PARTIAL_INDEX */
}
/*
* Since we just counted the tuples in the heap, we update its
* stats in pg_class to guarantee that the planner takes advantage
* of the index we just created. Finally, only update statistics
* during normal index definitions, not for indices on system catalogs
* created during bootstrap processing. We must close the relations
* before updatings statistics to guarantee that the relcache entries
* are flushed when we increment the command counter in UpdateStats().
*/
if (IsNormalProcessingMode())
#endif /* OMIT_PARTIAL_INDEX */
/* start a heap scan */
hscan = heap_beginscan(heap, 0, NowTimeQual, 0, (ScanKey) NULL);
htup = heap_getnext(hscan, 0, &buffer);
/* build the index */
nhtups = nitups = 0;
for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer))
{
hrelid = heap->rd_id;
irelid = index->rd_id;
heap_close(heap);
index_close(index);
UpdateStats(hrelid, nhtups, true);
UpdateStats(irelid, nitups, false);
if (oldPred != NULL) {
if (nitups == nhtups) pred = NULL;
UpdateIndexPredicate(irelid, oldPred, pred);
}
nhtups++;
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL)
{
/* SetSlotContents(slot, htup); */
#ifndef OMIT_PARTIAL_INDEX
slot->val = htup;
if (ExecQual((List *) oldPred, econtext) == true)
{
nitups++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/*
* Skip this tuple if it doesn't satisfy the partial-index
* predicate
*/
if (pred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
if (ExecQual((List *) pred, econtext) == false)
continue;
#endif /* OMIT_PARTIAL_INDEX */
}
nitups++;
/*
* For the current heap tuple, extract all the attributes we use
* in this index, and note which are null.
*/
for (i = 1; i <= natts; i++)
{
int attoff;
bool attnull;
/*
* Offsets are from the start of the tuple, and are
* zero-based; indices are one-based. The next call returns i
* - 1. That's data hiding for you.
*/
/* attoff = i - 1 */
attoff = AttrNumberGetAttrOffset(i);
/*
* below, attdata[attoff] set to equal some datum & attnull is
* changed to indicate whether or not the attribute is null
* for this tuple
*/
attdata[attoff] = GetIndexValue(htup,
htupdesc,
attoff,
attnum,
finfo,
&attnull,
buffer);
nulls[attoff] = (attnull ? 'n' : ' ');
}
/* form an index tuple and point it at the heap tuple */
itup = index_formtuple(itupdesc, attdata, nulls);
/*
* If the single index key is null, we don't insert it into the
* index. Hash tables support scans on '='. Relational algebra
* says that A = B returns null if either A or B is null. This
* means that no qualification used in an index scan could ever
* return true on a null attribute. It also means that indices
* can't be used by ISNULL or NOTNULL scans, but that's an
* artifact of the strategy map architecture chosen in 1986, not
* of the way nulls are handled here.
*/
if (itup->t_info & INDEX_NULL_MASK)
{
pfree(itup);
continue;
}
itup->t_tid = htup->t_ctid;
hitem = _hash_formitem(itup);
res = _hash_doinsert(index, hitem);
pfree(hitem);
pfree(itup);
pfree(res);
}
/* be tidy */
pfree(nulls);
pfree(attdata);
/* all done */
BuildingHash = false;
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
if (pred != NULL || oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, true);
pfree(econtext);
#endif /* OMIT_PARTIAL_INDEX */
}
/*
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
* index we just created. Finally, only update statistics during
* normal index definitions, not for indices on system catalogs
* created during bootstrap processing. We must close the relations
* before updatings statistics to guarantee that the relcache entries
* are flushed when we increment the command counter in UpdateStats().
*/
if (IsNormalProcessingMode())
{
hrelid = heap->rd_id;
irelid = index->rd_id;
heap_close(heap);
index_close(index);
UpdateStats(hrelid, nhtups, true);
UpdateStats(irelid, nitups, false);
if (oldPred != NULL)
{
if (nitups == nhtups)
pred = NULL;
UpdateIndexPredicate(irelid, oldPred, pred);
}
}
/* be tidy */
pfree(nulls);
pfree(attdata);
/* all done */
BuildingHash = false;
}
/*
* hashinsert() -- insert an index tuple into a hash table.
* hashinsert() -- insert an index tuple into a hash table.
*
* Hash on the index tuple's key, find the appropriate location
* for the new tuple, put it there, and return an InsertIndexResult
* to the caller.
* Hash on the index tuple's key, find the appropriate location
* for the new tuple, put it there, and return an InsertIndexResult
* to the caller.
*/
InsertIndexResult
hashinsert(Relation rel, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation heapRel)
hashinsert(Relation rel, Datum * datum, char *nulls, ItemPointer ht_ctid, Relation heapRel)
{
HashItem hitem;
IndexTuple itup;
InsertIndexResult res;
HashItem hitem;
IndexTuple itup;
InsertIndexResult res;
/* generate an index tuple */
itup = index_formtuple(RelationGetTupleDescriptor(rel), datum, nulls);
itup->t_tid = *ht_ctid;
if (itup->t_info & INDEX_NULL_MASK)
return ((InsertIndexResult) NULL);
hitem = _hash_formitem(itup);
res = _hash_doinsert(rel, hitem);
pfree(hitem);
pfree(itup);
return (res);
/* generate an index tuple */
itup = index_formtuple(RelationGetTupleDescriptor(rel), datum, nulls);
itup->t_tid = *ht_ctid;
if (itup->t_info & INDEX_NULL_MASK)
return ((InsertIndexResult) NULL);
hitem = _hash_formitem(itup);
res = _hash_doinsert(rel, hitem);
pfree(hitem);
pfree(itup);
return (res);
}
/*
* hashgettuple() -- Get the next tuple in the scan.
* hashgettuple() -- Get the next tuple in the scan.
*/
char *
char *
hashgettuple(IndexScanDesc scan, ScanDirection dir)
{
RetrieveIndexResult res;
/*
* If we've already initialized this scan, we can just advance it
* in the appropriate direction. If we haven't done so yet, we
* call a routine to get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
res = _hash_next(scan, dir);
else
res = _hash_first(scan, dir);
return ((char *) res);
RetrieveIndexResult res;
/*
* If we've already initialized this scan, we can just advance it in
* the appropriate direction. If we haven't done so yet, we call a
* routine to get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
res = _hash_next(scan, dir);
else
res = _hash_first(scan, dir);
return ((char *) res);
}
/*
* hashbeginscan() -- start a scan on a hash index
* hashbeginscan() -- start a scan on a hash index
*/
char *
char *
hashbeginscan(Relation rel,
bool fromEnd,
uint16 keysz,
ScanKey scankey)
bool fromEnd,
uint16 keysz,
ScanKey scankey)
{
IndexScanDesc scan;
HashScanOpaque so;
scan = RelationGetIndexScan(rel, fromEnd, keysz, scankey);
so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
so->hashso_curbuf = so->hashso_mrkbuf = InvalidBuffer;
scan->opaque = so;
scan->flags = 0x0;
/* register scan in case we change pages it's using */
_hash_regscan(scan);
return ((char *) scan);
IndexScanDesc scan;
HashScanOpaque so;
scan = RelationGetIndexScan(rel, fromEnd, keysz, scankey);
so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
so->hashso_curbuf = so->hashso_mrkbuf = InvalidBuffer;
scan->opaque = so;
scan->flags = 0x0;
/* register scan in case we change pages it's using */
_hash_regscan(scan);
return ((char *) scan);
}
/*
* hashrescan() -- rescan an index relation
* hashrescan() -- rescan an index relation
*/
void
hashrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey)
{
ItemPointer iptr;
HashScanOpaque so;
so = (HashScanOpaque) scan->opaque;
/* we hold a read lock on the current page in the scan */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* reset the scan key */
if (scan->numberOfKeys > 0) {
memmove(scan->keyData,
scankey,
scan->numberOfKeys * sizeof(ScanKeyData));
}
ItemPointer iptr;
HashScanOpaque so;
so = (HashScanOpaque) scan->opaque;
/* we hold a read lock on the current page in the scan */
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* reset the scan key */
if (scan->numberOfKeys > 0)
{
memmove(scan->keyData,
scankey,
scan->numberOfKeys * sizeof(ScanKeyData));
}
}
/*
* hashendscan() -- close down a scan
* hashendscan() -- close down a scan
*/
void
hashendscan(IndexScanDesc scan)
{
ItemPointer iptr;
HashScanOpaque so;
so = (HashScanOpaque) scan->opaque;
/* release any locks we still hold */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (BufferIsValid(so->hashso_mrkbuf))
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* don't need scan registered anymore */
_hash_dropscan(scan);
/* be tidy */
pfree (scan->opaque);
ItemPointer iptr;
HashScanOpaque so;
so = (HashScanOpaque) scan->opaque;
/* release any locks we still hold */
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
if (BufferIsValid(so->hashso_mrkbuf))
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* don't need scan registered anymore */
_hash_dropscan(scan);
/* be tidy */
pfree(scan->opaque);
}
/*
* hashmarkpos() -- save current scan position
* hashmarkpos() -- save current scan position
*
*/
void
hashmarkpos(IndexScanDesc scan)
{
ItemPointer iptr;
HashScanOpaque so;
/* see if we ever call this code. if we do, then so_mrkbuf a
* useful element in the scan->opaque structure. if this procedure
* is never called, so_mrkbuf should be removed from the scan->opaque
* structure.
*/
elog(NOTICE, "Hashmarkpos() called.");
so = (HashScanOpaque) scan->opaque;
/* release lock on old marked data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentItemData and copy to currentMarkData */
if (ItemPointerIsValid(&(scan->currentItemData))) {
so->hashso_mrkbuf = _hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_curbuf),
HASH_READ);
scan->currentMarkData = scan->currentItemData;
}
ItemPointer iptr;
HashScanOpaque so;
/*
* see if we ever call this code. if we do, then so_mrkbuf a useful
* element in the scan->opaque structure. if this procedure is never
* called, so_mrkbuf should be removed from the scan->opaque
* structure.
*/
elog(NOTICE, "Hashmarkpos() called.");
so = (HashScanOpaque) scan->opaque;
/* release lock on old marked data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentItemData and copy to currentMarkData */
if (ItemPointerIsValid(&(scan->currentItemData)))
{
so->hashso_mrkbuf = _hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_curbuf),
HASH_READ);
scan->currentMarkData = scan->currentItemData;
}
}
/*
* hashrestrpos() -- restore scan to last saved position
* hashrestrpos() -- restore scan to last saved position
*/
void
hashrestrpos(IndexScanDesc scan)
{
ItemPointer iptr;
HashScanOpaque so;
/* see if we ever call this code. if we do, then so_mrkbuf a
* useful element in the scan->opaque structure. if this procedure
* is never called, so_mrkbuf should be removed from the scan->opaque
* structure.
*/
elog(NOTICE, "Hashrestrpos() called.");
so = (HashScanOpaque) scan->opaque;
/* release lock on current data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData))) {
so->hashso_curbuf =
_hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);
scan->currentItemData = scan->currentMarkData;
}
ItemPointer iptr;
HashScanOpaque so;
/*
* see if we ever call this code. if we do, then so_mrkbuf a useful
* element in the scan->opaque structure. if this procedure is never
* called, so_mrkbuf should be removed from the scan->opaque
* structure.
*/
elog(NOTICE, "Hashrestrpos() called.");
so = (HashScanOpaque) scan->opaque;
/* release lock on current data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData)))
{
so->hashso_curbuf =
_hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);
scan->currentItemData = scan->currentMarkData;
}
}
/* stubs */
void
hashdelete(Relation rel, ItemPointer tid)
{
/* adjust any active scans that will be affected by this deletion */
_hash_adjscans(rel, tid);
/* delete the data from the page */
_hash_pagedel(rel, tid);
}
/* adjust any active scans that will be affected by this deletion */
_hash_adjscans(rel, tid);
/* delete the data from the page */
_hash_pagedel(rel, tid);
}

View File

@@ -1,17 +1,17 @@
/*-------------------------------------------------------------------------
*
* hashfunc.c--
* Comparison functions for hash access method.
* Comparison functions for hash access method.
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.3 1996/11/10 02:57:40 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.4 1997/09/07 04:37:53 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
* defined on hash tables, they compute the hash value of the argument.
* These functions are stored in pg_amproc. For each operator class
* defined on hash tables, they compute the hash value of the argument.
*
*-------------------------------------------------------------------------
*/
@@ -20,206 +20,223 @@
#include "access/hash.h"
uint32 hashint2(int16 key)
uint32
hashint2(int16 key)
{
return ((uint32) ~key);
return ((uint32) ~ key);
}
uint32 hashint4(uint32 key)
uint32
hashint4(uint32 key)
{
return (~key);
return (~key);
}
/* Hash function from Chris Torek. */
uint32 hashfloat4(float32 keyp)
uint32
hashfloat4(float32 keyp)
{
int len;
int loop;
uint32 h;
char *kp = (char *) keyp;
int len;
int loop;
uint32 h;
char *kp = (char *) keyp;
len = sizeof(float32data);
len = sizeof(float32data);
#define HASH4a h = (h << 5) - h + *kp++;
#define HASH4b h = (h << 5) + h + *kp++;
#define HASH4a h = (h << 5) - h + *kp++;
#define HASH4b h = (h << 5) + h + *kp++;
#define HASH4 HASH4b
h = 0;
if (len > 0) {
loop = (len + 8 - 1) >> 3;
switch (len & (8 - 1)) {
case 0:
do { /* All fall throughs */
HASH4;
case 7:
HASH4;
case 6:
HASH4;
case 5:
HASH4;
case 4:
HASH4;
case 3:
HASH4;
case 2:
HASH4;
case 1:
HASH4;
} while (--loop);
h = 0;
if (len > 0)
{
loop = (len + 8 - 1) >> 3;
switch (len & (8 - 1))
{
case 0:
do
{ /* All fall throughs */
HASH4;
case 7:
HASH4;
case 6:
HASH4;
case 5:
HASH4;
case 4:
HASH4;
case 3:
HASH4;
case 2:
HASH4;
case 1:
HASH4;
} while (--loop);
}
}
}
return (h);
}
return (h);
}
uint32 hashfloat8(float64 keyp)
uint32
hashfloat8(float64 keyp)
{
int len;
int loop;
uint32 h;
char *kp = (char *) keyp;
int len;
int loop;
uint32 h;
char *kp = (char *) keyp;
len = sizeof(float64data);
len = sizeof(float64data);
#define HASH4a h = (h << 5) - h + *kp++;
#define HASH4b h = (h << 5) + h + *kp++;
#define HASH4a h = (h << 5) - h + *kp++;
#define HASH4b h = (h << 5) + h + *kp++;
#define HASH4 HASH4b
h = 0;
if (len > 0) {
loop = (len + 8 - 1) >> 3;
switch (len & (8 - 1)) {
case 0:
do { /* All fall throughs */
HASH4;
case 7:
HASH4;
case 6:
HASH4;
case 5:
HASH4;
case 4:
HASH4;
case 3:
HASH4;
case 2:
HASH4;
case 1:
HASH4;
} while (--loop);
h = 0;
if (len > 0)
{
loop = (len + 8 - 1) >> 3;
switch (len & (8 - 1))
{
case 0:
do
{ /* All fall throughs */
HASH4;
case 7:
HASH4;
case 6:
HASH4;
case 5:
HASH4;
case 4:
HASH4;
case 3:
HASH4;
case 2:
HASH4;
case 1:
HASH4;
} while (--loop);
}
}
}
return (h);
}
uint32 hashoid(Oid key)
{
return ((uint32) ~key);
return (h);
}
uint32 hashchar(char key)
uint32
hashoid(Oid key)
{
int len;
uint32 h;
len = sizeof(char);
#define PRIME1 37
#define PRIME2 1048583
h = 0;
/* Convert char to integer */
h = h * PRIME1 ^ (key - ' ');
h %= PRIME2;
return (h);
}
uint32 hashchar2(uint16 intkey)
{
uint32 h;
int len;
char *key = (char *) &intkey;
h = 0;
len = sizeof(uint16);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32 hashchar4(uint32 intkey)
{
uint32 h;
int len;
char *key = (char *) &intkey;
h = 0;
len = sizeof(uint32);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32 hashchar8(char *key)
{
uint32 h;
int len;
h = 0;
len = sizeof(char8);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32 hashname(NameData *n)
{
uint32 h;
int len;
char *key;
key = n->data;
h = 0;
len = NAMEDATALEN;
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
return ((uint32) ~ key);
}
uint32 hashchar16(char *key)
uint32
hashchar(char key)
{
uint32 h;
int len;
h = 0;
len = sizeof(char16);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
int len;
uint32 h;
len = sizeof(char);
#define PRIME1 37
#define PRIME2 1048583
h = 0;
/* Convert char to integer */
h = h * PRIME1 ^ (key - ' ');
h %= PRIME2;
return (h);
}
uint32
hashchar2(uint16 intkey)
{
uint32 h;
int len;
char *key = (char *) &intkey;
h = 0;
len = sizeof(uint16);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32
hashchar4(uint32 intkey)
{
uint32 h;
int len;
char *key = (char *) &intkey;
h = 0;
len = sizeof(uint32);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32
hashchar8(char *key)
{
uint32 h;
int len;
h = 0;
len = sizeof(char8);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32
hashname(NameData * n)
{
uint32 h;
int len;
char *key;
key = n->data;
h = 0;
len = NAMEDATALEN;
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
uint32
hashchar16(char *key)
{
uint32 h;
int len;
h = 0;
len = sizeof(char16);
/* Convert string to integer */
while (len--)
h = h * PRIME1 ^ (*key++ - ' ');
h %= PRIME2;
return (h);
}
@@ -234,45 +251,49 @@ uint32 hashchar16(char *key)
*
* "OZ's original sdbm hash"
*/
uint32 hashtext(struct varlena *key)
uint32
hashtext(struct varlena * key)
{
int keylen;
char *keydata;
uint32 n;
int loop;
int keylen;
char *keydata;
uint32 n;
int loop;
keydata = VARDATA(key);
keylen = VARSIZE(key);
keydata = VARDATA(key);
keylen = VARSIZE(key);
/* keylen includes the four bytes in which string keylength is stored */
keylen -= sizeof(VARSIZE(key));
/* keylen includes the four bytes in which string keylength is stored */
keylen -= sizeof(VARSIZE(key));
#define HASHC n = *keydata++ + 65599 * n
#define HASHC n = *keydata++ + 65599 * n
n = 0;
if (keylen > 0) {
loop = (keylen + 8 - 1) >> 3;
switch (keylen & (8 - 1)) {
case 0:
do { /* All fall throughs */
HASHC;
case 7:
HASHC;
case 6:
HASHC;
case 5:
HASHC;
case 4:
HASHC;
case 3:
HASHC;
case 2:
HASHC;
case 1:
HASHC;
} while (--loop);
n = 0;
if (keylen > 0)
{
loop = (keylen + 8 - 1) >> 3;
switch (keylen & (8 - 1))
{
case 0:
do
{ /* All fall throughs */
HASHC;
case 7:
HASHC;
case 6:
HASHC;
case 5:
HASHC;
case 4:
HASHC;
case 3:
HASHC;
case 2:
HASHC;
case 1:
HASHC;
} while (--loop);
}
}
}
return (n);
}
return (n);
}

View File

@@ -1,19 +1,19 @@
/*-------------------------------------------------------------------------
*
* hashinsert.c--
* Item insertion in hash tables for Postgres.
* Item insertion in hash tables for Postgres.
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.8 1997/08/12 22:51:30 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.9 1997/09/07 04:37:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <postgres.h>
#include <access/hash.h>
#include <storage/bufmgr.h>
#include <utils/memutils.h>
@@ -22,211 +22,221 @@ static InsertIndexResult _hash_insertonpg(Relation rel, Buffer buf, int keysz, S
static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, int keysz, ScanKey itup_scankey, Size itemsize, HashItem hitem);
/*
* _hash_doinsert() -- Handle insertion of a single HashItem in the table.
* _hash_doinsert() -- Handle insertion of a single HashItem in the table.
*
* This routine is called by the public interface routines, hashbuild
* and hashinsert. By here, hashitem is filled in, and has a unique
* (xid, seqno) pair. The datum to be used as a "key" is in the
* hashitem.
* This routine is called by the public interface routines, hashbuild
* and hashinsert. By here, hashitem is filled in, and has a unique
* (xid, seqno) pair. The datum to be used as a "key" is in the
* hashitem.
*/
InsertIndexResult
_hash_doinsert(Relation rel, HashItem hitem)
{
Buffer buf;
Buffer metabuf;
BlockNumber blkno;
HashMetaPage metap;
IndexTuple itup;
InsertIndexResult res;
ScanKey itup_scankey;
int natts;
Page page;
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
/* we need a scan key to do our search, so build one */
itup = &(hitem->hash_itup);
if ((natts = rel->rd_rel->relnatts) != 1)
elog(WARN, "Hash indices valid for only one index key.");
itup_scankey = _hash_mkscankey(rel, itup, metap);
/*
* find the first page in the bucket chain containing this key and
* place it in buf. _hash_search obtains a read lock for us.
*/
_hash_search(rel, natts, itup_scankey, &buf, metap);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
Buffer buf;
Buffer metabuf;
BlockNumber blkno;
HashMetaPage metap;
IndexTuple itup;
InsertIndexResult res;
ScanKey itup_scankey;
int natts;
Page page;
/*
* trade in our read lock for a write lock so that we can do the
* insertion.
*/
blkno = BufferGetBlockNumber(buf);
_hash_relbuf(rel, buf, HASH_READ);
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
/*
* XXX btree comment (haven't decided what to do in hash): don't
* think the bucket can be split while we're reading the metapage.
*
* If the page was split between the time that we surrendered our
* read lock and acquired our write lock, then this page may no
* longer be the right place for the key we want to insert.
*/
/* do the insertion */
res = _hash_insertonpg(rel, buf, natts, itup_scankey,
hitem, metabuf);
/* be tidy */
_hash_freeskey(itup_scankey);
return (res);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
/* we need a scan key to do our search, so build one */
itup = &(hitem->hash_itup);
if ((natts = rel->rd_rel->relnatts) != 1)
elog(WARN, "Hash indices valid for only one index key.");
itup_scankey = _hash_mkscankey(rel, itup, metap);
/*
* find the first page in the bucket chain containing this key and
* place it in buf. _hash_search obtains a read lock for us.
*/
_hash_search(rel, natts, itup_scankey, &buf, metap);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
/*
* trade in our read lock for a write lock so that we can do the
* insertion.
*/
blkno = BufferGetBlockNumber(buf);
_hash_relbuf(rel, buf, HASH_READ);
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
/*
* XXX btree comment (haven't decided what to do in hash): don't think
* the bucket can be split while we're reading the metapage.
*
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be
* the right place for the key we want to insert.
*/
/* do the insertion */
res = _hash_insertonpg(rel, buf, natts, itup_scankey,
hitem, metabuf);
/* be tidy */
_hash_freeskey(itup_scankey);
return (res);
}
/*
* _hash_insertonpg() -- Insert a tuple on a particular page in the table.
* _hash_insertonpg() -- Insert a tuple on a particular page in the table.
*
* This recursive procedure does the following things:
* This recursive procedure does the following things:
*
* + if necessary, splits the target page.
* + inserts the tuple.
* + if necessary, splits the target page.
* + inserts the tuple.
*
* On entry, we must have the right buffer on which to do the
* insertion, and the buffer must be pinned and locked. On return,
* we will have dropped both the pin and the write lock on the buffer.
* On entry, we must have the right buffer on which to do the
* insertion, and the buffer must be pinned and locked. On return,
* we will have dropped both the pin and the write lock on the buffer.
*
*/
static InsertIndexResult
static InsertIndexResult
_hash_insertonpg(Relation rel,
Buffer buf,
int keysz,
ScanKey scankey,
HashItem hitem,
Buffer metabuf)
Buffer buf,
int keysz,
ScanKey scankey,
HashItem hitem,
Buffer metabuf)
{
InsertIndexResult res;
Page page;
BlockNumber itup_blkno;
OffsetNumber itup_off;
int itemsz;
HashPageOpaque pageopaque;
bool do_expand = false;
Buffer ovflbuf;
HashMetaPage metap;
Bucket bucket;
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
bucket = pageopaque->hasho_bucket;
InsertIndexResult res;
Page page;
BlockNumber itup_blkno;
OffsetNumber itup_off;
int itemsz;
HashPageOpaque pageopaque;
bool do_expand = false;
Buffer ovflbuf;
HashMetaPage metap;
Bucket bucket;
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = DOUBLEALIGN(itemsz);
while (PageGetFreeSpace(page) < itemsz) {
/*
* no space on this page; check for an overflow page
*/
if (BlockNumberIsValid(pageopaque->hasho_nextblkno)) {
/*
* ovfl page exists; go get it. if it doesn't have room,
* we'll find out next pass through the loop test above.
*/
ovflbuf = _hash_getbuf(rel, pageopaque->hasho_nextblkno,
HASH_WRITE);
_hash_relbuf(rel, buf, HASH_WRITE);
buf = ovflbuf;
page = BufferGetPage(buf);
} else {
/*
* we're at the end of the bucket chain and we haven't
* found a page with enough room. allocate a new overflow
* page.
*/
do_expand = true;
ovflbuf = _hash_addovflpage(rel, &metabuf, buf);
_hash_relbuf(rel, buf, HASH_WRITE);
buf = ovflbuf;
page = BufferGetPage(buf);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
if (PageGetFreeSpace(page) < itemsz) {
/* it doesn't fit on an empty page -- give up */
elog(WARN, "hash item too large");
}
}
_hash_checkpage(page, LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(pageopaque->hasho_bucket == bucket);
}
bucket = pageopaque->hasho_bucket;
itup_off = _hash_pgaddtup(rel, buf, keysz, scankey, itemsz, hitem);
itup_blkno = BufferGetBlockNumber(buf);
/* by here, the new tuple is inserted */
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
ItemPointerSet(&(res->pointerData), itup_blkno, itup_off);
if (res != NULL) {
/*
* Increment the number of keys in the table.
* We switch lock access type just for a moment
* to allow greater accessibility to the metapage.
*/
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf,
HASH_READ, HASH_WRITE);
metap->hashm_nkeys += 1;
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf,
HASH_WRITE, HASH_READ);
}
_hash_wrtbuf(rel, buf);
if (do_expand ||
(metap->hashm_nkeys / (metap->hashm_maxbucket + 1))
> metap->hashm_ffactor) {
_hash_expandtable(rel, metabuf);
}
_hash_relbuf(rel, metabuf, HASH_READ);
return (res);
}
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = DOUBLEALIGN(itemsz);
while (PageGetFreeSpace(page) < itemsz)
{
/*
* no space on this page; check for an overflow page
*/
if (BlockNumberIsValid(pageopaque->hasho_nextblkno))
{
/*
* ovfl page exists; go get it. if it doesn't have room,
* we'll find out next pass through the loop test above.
*/
ovflbuf = _hash_getbuf(rel, pageopaque->hasho_nextblkno,
HASH_WRITE);
_hash_relbuf(rel, buf, HASH_WRITE);
buf = ovflbuf;
page = BufferGetPage(buf);
}
else
{
/*
* we're at the end of the bucket chain and we haven't found a
* page with enough room. allocate a new overflow page.
*/
do_expand = true;
ovflbuf = _hash_addovflpage(rel, &metabuf, buf);
_hash_relbuf(rel, buf, HASH_WRITE);
buf = ovflbuf;
page = BufferGetPage(buf);
if (PageGetFreeSpace(page) < itemsz)
{
/* it doesn't fit on an empty page -- give up */
elog(WARN, "hash item too large");
}
}
_hash_checkpage(page, LH_OVERFLOW_PAGE);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(pageopaque->hasho_bucket == bucket);
}
itup_off = _hash_pgaddtup(rel, buf, keysz, scankey, itemsz, hitem);
itup_blkno = BufferGetBlockNumber(buf);
/* by here, the new tuple is inserted */
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
ItemPointerSet(&(res->pointerData), itup_blkno, itup_off);
if (res != NULL)
{
/*
* Increment the number of keys in the table. We switch lock
* access type just for a moment to allow greater accessibility to
* the metapage.
*/
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf,
HASH_READ, HASH_WRITE);
metap->hashm_nkeys += 1;
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf,
HASH_WRITE, HASH_READ);
}
_hash_wrtbuf(rel, buf);
if (do_expand ||
(metap->hashm_nkeys / (metap->hashm_maxbucket + 1))
> metap->hashm_ffactor)
{
_hash_expandtable(rel, metabuf);
}
_hash_relbuf(rel, metabuf, HASH_READ);
return (res);
}
/*
* _hash_pgaddtup() -- add a tuple to a particular page in the index.
* _hash_pgaddtup() -- add a tuple to a particular page in the index.
*
* This routine adds the tuple to the page as requested, and keeps the
* write lock and reference associated with the page's buffer. It is
* an error to call pgaddtup() without a write lock and reference.
* This routine adds the tuple to the page as requested, and keeps the
* write lock and reference associated with the page's buffer. It is
* an error to call pgaddtup() without a write lock and reference.
*/
static OffsetNumber
static OffsetNumber
_hash_pgaddtup(Relation rel,
Buffer buf,
int keysz,
ScanKey itup_scankey,
Size itemsize,
HashItem hitem)
Buffer buf,
int keysz,
ScanKey itup_scankey,
Size itemsize,
HashItem hitem)
{
OffsetNumber itup_off;
Page page;
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE);
OffsetNumber itup_off;
Page page;
itup_off = OffsetNumberNext(PageGetMaxOffsetNumber(page));
PageAddItem(page, (Item) hitem, itemsize, itup_off, LP_USED);
/* write the buffer, but hold our lock */
_hash_wrtnorelbuf(rel, buf);
return (itup_off);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
itup_off = OffsetNumberNext(PageGetMaxOffsetNumber(page));
PageAddItem(page, (Item) hitem, itemsize, itup_off, LP_USED);
/* write the buffer, but hold our lock */
_hash_wrtnorelbuf(rel, buf);
return (itup_off);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,160 +1,167 @@
/*-------------------------------------------------------------------------
*
* hashscan.c--
* manage scans on hash tables
* manage scans on hash tables
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.8 1996/11/15 18:36:31 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.9 1997/09/07 04:38:01 momjian Exp $
*
* NOTES
* Because we can be doing an index scan on a relation while we
* update it, we need to avoid missing data that moves around in
* the index. The routines and global variables in this file
* guarantee that all scans in the local address space stay
* correctly positioned. This is all we need to worry about, since
* write locking guarantees that no one else will be on the same
* page at the same time as we are.
* Because we can be doing an index scan on a relation while we
* update it, we need to avoid missing data that moves around in
* the index. The routines and global variables in this file
* guarantee that all scans in the local address space stay
* correctly positioned. This is all we need to worry about, since
* write locking guarantees that no one else will be on the same
* page at the same time as we are.
*
* The scheme is to manage a list of active scans in the current
* backend. Whenever we add or remove records from an index, we
* check the list of active scans to see if any has been affected.
* A scan is affected only if it is on the same relation, and the
* same page, as the update.
* The scheme is to manage a list of active scans in the current
* backend. Whenever we add or remove records from an index, we
* check the list of active scans to see if any has been affected.
* A scan is affected only if it is on the same relation, and the
* same page, as the update.
*
*-------------------------------------------------------------------------
*/
#include <postgres.h>
#include <access/hash.h>
static void _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno);
static bool _hash_scantouched(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno);
static void _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno);
static bool _hash_scantouched(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno);
typedef struct HashScanListData {
IndexScanDesc hashsl_scan;
struct HashScanListData *hashsl_next;
} HashScanListData;
typedef struct HashScanListData
{
IndexScanDesc hashsl_scan;
struct HashScanListData *hashsl_next;
} HashScanListData;
typedef HashScanListData *HashScanList;
typedef HashScanListData *HashScanList;
static HashScanList HashScans = (HashScanList) NULL;
static HashScanList HashScans = (HashScanList) NULL;
/*
* _Hash_regscan() -- register a new scan.
* _Hash_regscan() -- register a new scan.
*/
void
_hash_regscan(IndexScanDesc scan)
{
HashScanList new_el;
new_el = (HashScanList) palloc(sizeof(HashScanListData));
new_el->hashsl_scan = scan;
new_el->hashsl_next = HashScans;
HashScans = new_el;
HashScanList new_el;
new_el = (HashScanList) palloc(sizeof(HashScanListData));
new_el->hashsl_scan = scan;
new_el->hashsl_next = HashScans;
HashScans = new_el;
}
/*
* _hash_dropscan() -- drop a scan from the scan list
* _hash_dropscan() -- drop a scan from the scan list
*/
void
_hash_dropscan(IndexScanDesc scan)
{
HashScanList chk, last;
last = (HashScanList) NULL;
for (chk = HashScans;
chk != (HashScanList) NULL && chk->hashsl_scan != scan;
chk = chk->hashsl_next) {
last = chk;
}
if (chk == (HashScanList) NULL)
elog(WARN, "hash scan list trashed; can't find 0x%lx", scan);
if (last == (HashScanList) NULL)
HashScans = chk->hashsl_next;
else
last->hashsl_next = chk->hashsl_next;
pfree (chk);
HashScanList chk,
last;
last = (HashScanList) NULL;
for (chk = HashScans;
chk != (HashScanList) NULL && chk->hashsl_scan != scan;
chk = chk->hashsl_next)
{
last = chk;
}
if (chk == (HashScanList) NULL)
elog(WARN, "hash scan list trashed; can't find 0x%lx", scan);
if (last == (HashScanList) NULL)
HashScans = chk->hashsl_next;
else
last->hashsl_next = chk->hashsl_next;
pfree(chk);
}
void
_hash_adjscans(Relation rel, ItemPointer tid)
{
HashScanList l;
Oid relid;
relid = rel->rd_id;
for (l = HashScans; l != (HashScanList) NULL; l = l->hashsl_next) {
if (relid == l->hashsl_scan->relation->rd_id)
_hash_scandel(l->hashsl_scan, ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
}
HashScanList l;
Oid relid;
relid = rel->rd_id;
for (l = HashScans; l != (HashScanList) NULL; l = l->hashsl_next)
{
if (relid == l->hashsl_scan->relation->rd_id)
_hash_scandel(l->hashsl_scan, ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
}
}
static void
_hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
ItemPointer current;
Buffer buf;
Buffer metabuf;
HashScanOpaque so;
if (!_hash_scantouched(scan, blkno, offno))
return;
metabuf = _hash_getbuf(scan->relation, HASH_METAPAGE, HASH_READ);
so = (HashScanOpaque) scan->opaque;
buf = so->hashso_curbuf;
current = &(scan->currentItemData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno) {
_hash_step(scan, &buf, BackwardScanDirection, metabuf);
so->hashso_curbuf = buf;
}
current = &(scan->currentMarkData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno) {
ItemPointerData tmp;
tmp = *current;
*current = scan->currentItemData;
scan->currentItemData = tmp;
_hash_step(scan, &buf, BackwardScanDirection, metabuf);
so->hashso_mrkbuf = buf;
tmp = *current;
*current = scan->currentItemData;
scan->currentItemData = tmp;
}
ItemPointer current;
Buffer buf;
Buffer metabuf;
HashScanOpaque so;
if (!_hash_scantouched(scan, blkno, offno))
return;
metabuf = _hash_getbuf(scan->relation, HASH_METAPAGE, HASH_READ);
so = (HashScanOpaque) scan->opaque;
buf = so->hashso_curbuf;
current = &(scan->currentItemData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno)
{
_hash_step(scan, &buf, BackwardScanDirection, metabuf);
so->hashso_curbuf = buf;
}
current = &(scan->currentMarkData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno)
{
ItemPointerData tmp;
tmp = *current;
*current = scan->currentItemData;
scan->currentItemData = tmp;
_hash_step(scan, &buf, BackwardScanDirection, metabuf);
so->hashso_mrkbuf = buf;
tmp = *current;
*current = scan->currentItemData;
scan->currentItemData = tmp;
}
}
static bool
static bool
_hash_scantouched(IndexScanDesc scan,
BlockNumber blkno,
OffsetNumber offno)
BlockNumber blkno,
OffsetNumber offno)
{
ItemPointer current;
current = &(scan->currentItemData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno)
return (true);
current = &(scan->currentMarkData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno)
return (true);
return (false);
ItemPointer current;
current = &(scan->currentItemData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno)
return (true);
current = &(scan->currentMarkData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno)
return (true);
return (false);
}

View File

@@ -1,423 +1,467 @@
/*-------------------------------------------------------------------------
*
* hashsearch.c--
* search code for postgres hash tables
* search code for postgres hash tables
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.10 1997/06/28 05:45:40 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.11 1997/09/07 04:38:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <postgres.h>
#include <access/hash.h>
#include <storage/bufmgr.h>
#ifndef HAVE_MEMMOVE
# include "regex/utils.h"
#include "regex/utils.h"
#else
# include <string.h>
#endif
#include <string.h>
#endif
/*
* _hash_search() -- Finds the page/bucket that the contains the
* scankey and loads it into *bufP. the buffer has a read lock.
* _hash_search() -- Finds the page/bucket that the contains the
* scankey and loads it into *bufP. the buffer has a read lock.
*/
void
_hash_search(Relation rel,
int keysz,
ScanKey scankey,
Buffer *bufP,
HashMetaPage metap)
int keysz,
ScanKey scankey,
Buffer * bufP,
HashMetaPage metap)
{
BlockNumber blkno;
Datum keyDatum;
Bucket bucket;
BlockNumber blkno;
Datum keyDatum;
Bucket bucket;
if (scankey == (ScanKey) NULL ||
(keyDatum = scankey[0].sk_argument) == (Datum) NULL) {
/*
* If the scankey argument is NULL, all tuples will satisfy
* the scan so we start the scan at the first bucket (bucket
* 0).
*/
bucket = 0;
} else {
bucket = _hash_call(rel, metap, keyDatum);
}
if (scankey == (ScanKey) NULL ||
(keyDatum = scankey[0].sk_argument) == (Datum) NULL)
{
blkno = BUCKET_TO_BLKNO(bucket);
*bufP = _hash_getbuf(rel, blkno, HASH_READ);
/*
* If the scankey argument is NULL, all tuples will satisfy the
* scan so we start the scan at the first bucket (bucket 0).
*/
bucket = 0;
}
else
{
bucket = _hash_call(rel, metap, keyDatum);
}
blkno = BUCKET_TO_BLKNO(bucket);
*bufP = _hash_getbuf(rel, blkno, HASH_READ);
}
/*
* _hash_next() -- Get the next item in a scan.
* _hash_next() -- Get the next item in a scan.
*
* On entry, we have a valid currentItemData in the scan, and a
* read lock on the page that contains that item. We do not have
* the page pinned. We return the next item in the scan. On
* exit, we have the page containing the next item locked but not
* pinned.
* On entry, we have a valid currentItemData in the scan, and a
* read lock on the page that contains that item. We do not have
* the page pinned. We return the next item in the scan. On
* exit, we have the page containing the next item locked but not
* pinned.
*/
RetrieveIndexResult
_hash_next(IndexScanDesc scan, ScanDirection dir)
{
Relation rel;
Buffer buf;
Buffer metabuf;
Page page;
OffsetNumber offnum;
RetrieveIndexResult res;
ItemPointer current;
HashItem hitem;
IndexTuple itup;
HashScanOpaque so;
Relation rel;
Buffer buf;
Buffer metabuf;
Page page;
OffsetNumber offnum;
RetrieveIndexResult res;
ItemPointer current;
HashItem hitem;
IndexTuple itup;
HashScanOpaque so;
rel = scan->relation;
so = (HashScanOpaque) scan->opaque;
current = &(scan->currentItemData);
rel = scan->relation;
so = (HashScanOpaque) scan->opaque;
current = &(scan->currentItemData);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
/*
* XXX 10 may 91: somewhere there's a bug in our management of the
* cached buffer for this scan. wei discovered it. the following
* is a workaround so he can work until i figure out what's going on.
*/
/*
* XXX 10 may 91: somewhere there's a bug in our management of the
* cached buffer for this scan. wei discovered it. the following is
* a workaround so he can work until i figure out what's going on.
*/
if (!BufferIsValid(so->hashso_curbuf)) {
so->hashso_curbuf = _hash_getbuf(rel,
ItemPointerGetBlockNumber(current),
HASH_READ);
}
if (!BufferIsValid(so->hashso_curbuf))
{
so->hashso_curbuf = _hash_getbuf(rel,
ItemPointerGetBlockNumber(current),
HASH_READ);
}
/* we still have the buffer pinned and locked */
buf = so->hashso_curbuf;
/* we still have the buffer pinned and locked */
buf = so->hashso_curbuf;
/*
* step to next valid tuple. note that _hash_step releases our
* lock on 'metabuf'; if we switch to a new 'buf' while looking
* for the next tuple, we come back with a lock on that buffer.
*/
if (!_hash_step(scan, &buf, dir, metabuf)) {
return ((RetrieveIndexResult) NULL);
}
/*
* step to next valid tuple. note that _hash_step releases our lock
* on 'metabuf'; if we switch to a new 'buf' while looking for the
* next tuple, we come back with a lock on that buffer.
*/
if (!_hash_step(scan, &buf, dir, metabuf))
{
return ((RetrieveIndexResult) NULL);
}
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
offnum = ItemPointerGetOffsetNumber(current);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE);
hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum));
itup = &hitem->hash_itup;
res = FormRetrieveIndexResult(current, &(itup->t_tid));
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
offnum = ItemPointerGetOffsetNumber(current);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum));
itup = &hitem->hash_itup;
res = FormRetrieveIndexResult(current, &(itup->t_tid));
return (res);
return (res);
}
static void
_hash_readnext(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
Buffer * bufp, Page * pagep, HashPageOpaque * opaquep)
{
BlockNumber blkno;
BlockNumber blkno;
blkno = (*opaquep)->hasho_nextblkno;
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno)) {
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
*pagep = BufferGetPage(*bufp);
_hash_checkpage(*pagep, LH_OVERFLOW_PAGE);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
Assert(!PageIsEmpty(*pagep));
}
blkno = (*opaquep)->hasho_nextblkno;
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
*pagep = BufferGetPage(*bufp);
_hash_checkpage(*pagep, LH_OVERFLOW_PAGE);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
Assert(!PageIsEmpty(*pagep));
}
}
static void
_hash_readprev(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
Buffer * bufp, Page * pagep, HashPageOpaque * opaquep)
{
BlockNumber blkno;
BlockNumber blkno;
blkno = (*opaquep)->hasho_prevblkno;
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno)) {
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
*pagep = BufferGetPage(*bufp);
_hash_checkpage(*pagep, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
if (PageIsEmpty(*pagep)) {
Assert((*opaquep)->hasho_flag & LH_BUCKET_PAGE);
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
blkno = (*opaquep)->hasho_prevblkno;
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
*pagep = BufferGetPage(*bufp);
_hash_checkpage(*pagep, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
if (PageIsEmpty(*pagep))
{
Assert((*opaquep)->hasho_flag & LH_BUCKET_PAGE);
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
}
}
}
}
/*
* _hash_first() -- Find the first item in a scan.
* _hash_first() -- Find the first item in a scan.
*
* Return the RetrieveIndexResult of the first item in the tree that
* satisfies the qualificatin associated with the scan descriptor. On
* exit, the page containing the current index tuple is read locked
* and pinned, and the scan's opaque data entry is updated to
* include the buffer.
* Return the RetrieveIndexResult of the first item in the tree that
* satisfies the qualificatin associated with the scan descriptor. On
* exit, the page containing the current index tuple is read locked
* and pinned, and the scan's opaque data entry is updated to
* include the buffer.
*/
RetrieveIndexResult
_hash_first(IndexScanDesc scan, ScanDirection dir)
{
Relation rel;
Buffer buf;
Buffer metabuf;
Page page;
HashPageOpaque opaque;
HashMetaPage metap;
HashItem hitem;
IndexTuple itup;
ItemPointer current;
OffsetNumber offnum;
RetrieveIndexResult res;
HashScanOpaque so;
Relation rel;
Buffer buf;
Buffer metabuf;
Page page;
HashPageOpaque opaque;
HashMetaPage metap;
HashItem hitem;
IndexTuple itup;
ItemPointer current;
OffsetNumber offnum;
RetrieveIndexResult res;
HashScanOpaque so;
rel = scan->relation;
so = (HashScanOpaque) scan->opaque;
current = &(scan->currentItemData);
rel = scan->relation;
so = (HashScanOpaque) scan->opaque;
current = &(scan->currentItemData);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
/*
* XXX -- The attribute number stored in the scan key is the attno
* in the heap relation. We need to transmogrify this into
* the index relation attno here. For the moment, we have
* hardwired attno == 1.
*/
/*
* XXX -- The attribute number stored in the scan key is the attno in
* the heap relation. We need to transmogrify this into the index
* relation attno here. For the moment, we have hardwired attno == 1.
*/
/* find the correct bucket page and load it into buf */
_hash_search(rel, 1, scan->keyData, &buf, metap);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
/* find the correct bucket page and load it into buf */
_hash_search(rel, 1, scan->keyData, &buf, metap);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
/*
* if we are scanning forward, we need to find the first non-empty
* page (if any) in the bucket chain. since overflow pages are
* never empty, this had better be either the bucket page or the
* first overflow page.
*
* if we are scanning backward, we always go all the way to the
* end of the bucket chain.
*/
if (PageIsEmpty(page)) {
if (BlockNumberIsValid(opaque->hasho_nextblkno)) {
_hash_readnext(rel, &buf, &page, &opaque);
} else {
ItemPointerSetInvalid(current);
so->hashso_curbuf = InvalidBuffer;
/*
* If there is no scankeys, all tuples will satisfy
* the scan - so we continue in _hash_step to get
* tuples from all buckets. - vadim 04/29/97
*/
if ( scan->numberOfKeys >= 1 )
{
_hash_relbuf(rel, buf, HASH_READ);
_hash_relbuf(rel, metabuf, HASH_READ);
return ((RetrieveIndexResult) NULL);
}
/*
* if we are scanning forward, we need to find the first non-empty
* page (if any) in the bucket chain. since overflow pages are never
* empty, this had better be either the bucket page or the first
* overflow page.
*
* if we are scanning backward, we always go all the way to the end of
* the bucket chain.
*/
if (PageIsEmpty(page))
{
if (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
else
{
ItemPointerSetInvalid(current);
so->hashso_curbuf = InvalidBuffer;
/*
* If there is no scankeys, all tuples will satisfy the scan -
* so we continue in _hash_step to get tuples from all
* buckets. - vadim 04/29/97
*/
if (scan->numberOfKeys >= 1)
{
_hash_relbuf(rel, buf, HASH_READ);
_hash_relbuf(rel, metabuf, HASH_READ);
return ((RetrieveIndexResult) NULL);
}
}
}
}
if (ScanDirectionIsBackward(dir)) {
while (BlockNumberIsValid(opaque->hasho_nextblkno)) {
_hash_readnext(rel, &buf, &page, &opaque);
if (ScanDirectionIsBackward(dir))
{
while (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
}
}
if (!_hash_step(scan, &buf, dir, metabuf)) {
return ((RetrieveIndexResult) NULL);
}
if (!_hash_step(scan, &buf, dir, metabuf))
{
return ((RetrieveIndexResult) NULL);
}
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
offnum = ItemPointerGetOffsetNumber(current);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE);
hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum));
itup = &hitem->hash_itup;
res = FormRetrieveIndexResult(current, &(itup->t_tid));
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
offnum = ItemPointerGetOffsetNumber(current);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum));
itup = &hitem->hash_itup;
res = FormRetrieveIndexResult(current, &(itup->t_tid));
return (res);
return (res);
}
/*
* _hash_step() -- step to the next valid item in a scan in the bucket.
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
* false. Else, return true and set the CurrentItemData for the
* scan to the right thing.
*
* 'bufP' points to the buffer which contains the current page
* that we'll step through.
* If no valid record exists in the requested direction, return
* false. Else, return true and set the CurrentItemData for the
* scan to the right thing.
*
* 'metabuf' is released when this returns.
* 'bufP' points to the buffer which contains the current page
* that we'll step through.
*
* 'metabuf' is released when this returns.
*/
bool
_hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
_hash_step(IndexScanDesc scan, Buffer * bufP, ScanDirection dir, Buffer metabuf)
{
Relation rel;
ItemPointer current;
HashScanOpaque so;
int allbuckets;
HashMetaPage metap;
Buffer buf;
Page page;
HashPageOpaque opaque;
OffsetNumber maxoff;
OffsetNumber offnum;
Bucket bucket;
BlockNumber blkno;
HashItem hitem;
IndexTuple itup;
Relation rel;
ItemPointer current;
HashScanOpaque so;
int allbuckets;
HashMetaPage metap;
Buffer buf;
Page page;
HashPageOpaque opaque;
OffsetNumber maxoff;
OffsetNumber offnum;
Bucket bucket;
BlockNumber blkno;
HashItem hitem;
IndexTuple itup;
rel = scan->relation;
current = &(scan->currentItemData);
so = (HashScanOpaque) scan->opaque;
allbuckets = (scan->numberOfKeys < 1);
rel = scan->relation;
current = &(scan->currentItemData);
so = (HashScanOpaque) scan->opaque;
allbuckets = (scan->numberOfKeys < 1);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
buf = *bufP;
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
buf = *bufP;
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
/*
* If _hash_step is called from _hash_first, current will not be
* valid, so we can't dereference it. However, in that case, we
* presumably want to start at the beginning/end of the page...
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current)) {
offnum = ItemPointerGetOffsetNumber(current);
} else {
offnum = InvalidOffsetNumber;
}
/*
* 'offnum' now points to the last tuple we have seen (if any).
*
* continue to step through tuples until:
* 1) we get to the end of the bucket chain or
* 2) we find a valid tuple.
*/
do {
bucket = opaque->hasho_bucket;
switch (dir) {
case ForwardScanDirection:
if (offnum != InvalidOffsetNumber) {
offnum = OffsetNumberNext(offnum); /* move forward */
} else {
offnum = FirstOffsetNumber; /* new page */
}
while (offnum > maxoff) {
/*
* either this page is empty (maxoff ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readnext(rel, &buf, &page, &opaque);
if (BufferIsInvalid(buf)) { /* end of chain */
if (allbuckets && bucket < metap->hashm_maxbucket) {
++bucket;
blkno = BUCKET_TO_BLKNO(bucket);
buf = _hash_getbuf(rel, blkno, HASH_READ);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (PageIsEmpty(page) &&
BlockNumberIsValid(opaque->hasho_nextblkno)) {
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
} else {
maxoff = offnum = InvalidOffsetNumber;
break; /* while */
}
} else {
/* _hash_readnext never returns an empty page */
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
}
}
break;
case BackwardScanDirection:
if (offnum != InvalidOffsetNumber) {
offnum = OffsetNumberPrev(offnum); /* move back */
} else {
offnum = maxoff; /* new page */
}
while (offnum < FirstOffsetNumber) {
/*
* either this page is empty (offnum ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readprev(rel, &buf, &page, &opaque);
if (BufferIsInvalid(buf)) { /* end of chain */
if (allbuckets && bucket > 0) {
--bucket;
blkno = BUCKET_TO_BLKNO(bucket);
buf = _hash_getbuf(rel, blkno, HASH_READ);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (BlockNumberIsValid(opaque->hasho_nextblkno)) {
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = offnum = PageGetMaxOffsetNumber(page);
} else {
maxoff = offnum = InvalidOffsetNumber;
break; /* while */
}
} else {
/* _hash_readprev never returns an empty page */
maxoff = offnum = PageGetMaxOffsetNumber(page);
}
}
break;
default:
/* NoMovementScanDirection */
/* this should not be reached */
break;
/*
* If _hash_step is called from _hash_first, current will not be
* valid, so we can't dereference it. However, in that case, we
* presumably want to start at the beginning/end of the page...
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current))
{
offnum = ItemPointerGetOffsetNumber(current);
}
else
{
offnum = InvalidOffsetNumber;
}
/* we ran off the end of the world without finding a match */
if (offnum == InvalidOffsetNumber) {
_hash_relbuf(rel, metabuf, HASH_READ);
*bufP = so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);
return(false);
}
/* get ready to check this tuple */
hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum));
itup = &hitem->hash_itup;
} while (!_hash_checkqual(scan, itup));
/* if we made it to here, we've found a valid tuple */
_hash_relbuf(rel, metabuf, HASH_READ);
blkno = BufferGetBlockNumber(buf);
*bufP = so->hashso_curbuf = buf;
ItemPointerSet(current, blkno, offnum);
return(true);
/*
* 'offnum' now points to the last tuple we have seen (if any).
*
* continue to step through tuples until: 1) we get to the end of the
* bucket chain or 2) we find a valid tuple.
*/
do
{
bucket = opaque->hasho_bucket;
switch (dir)
{
case ForwardScanDirection:
if (offnum != InvalidOffsetNumber)
{
offnum = OffsetNumberNext(offnum); /* move forward */
}
else
{
offnum = FirstOffsetNumber; /* new page */
}
while (offnum > maxoff)
{
/*
* either this page is empty (maxoff ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readnext(rel, &buf, &page, &opaque);
if (BufferIsInvalid(buf))
{ /* end of chain */
if (allbuckets && bucket < metap->hashm_maxbucket)
{
++bucket;
blkno = BUCKET_TO_BLKNO(bucket);
buf = _hash_getbuf(rel, blkno, HASH_READ);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (PageIsEmpty(page) &&
BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
}
else
{
maxoff = offnum = InvalidOffsetNumber;
break; /* while */
}
}
else
{
/* _hash_readnext never returns an empty page */
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
}
}
break;
case BackwardScanDirection:
if (offnum != InvalidOffsetNumber)
{
offnum = OffsetNumberPrev(offnum); /* move back */
}
else
{
offnum = maxoff;/* new page */
}
while (offnum < FirstOffsetNumber)
{
/*
* either this page is empty (offnum ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readprev(rel, &buf, &page, &opaque);
if (BufferIsInvalid(buf))
{ /* end of chain */
if (allbuckets && bucket > 0)
{
--bucket;
blkno = BUCKET_TO_BLKNO(bucket);
buf = _hash_getbuf(rel, blkno, HASH_READ);
page = BufferGetPage(buf);
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = offnum = PageGetMaxOffsetNumber(page);
}
else
{
maxoff = offnum = InvalidOffsetNumber;
break; /* while */
}
}
else
{
/* _hash_readprev never returns an empty page */
maxoff = offnum = PageGetMaxOffsetNumber(page);
}
}
break;
default:
/* NoMovementScanDirection */
/* this should not be reached */
break;
}
/* we ran off the end of the world without finding a match */
if (offnum == InvalidOffsetNumber)
{
_hash_relbuf(rel, metabuf, HASH_READ);
*bufP = so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);
return (false);
}
/* get ready to check this tuple */
hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum));
itup = &hitem->hash_itup;
} while (!_hash_checkqual(scan, itup));
/* if we made it to here, we've found a valid tuple */
_hash_relbuf(rel, metabuf, HASH_READ);
blkno = BufferGetBlockNumber(buf);
*bufP = so->hashso_curbuf = buf;
ItemPointerSet(current, blkno, offnum);
return (true);
}

View File

@@ -1,80 +1,83 @@
/*-------------------------------------------------------------------------
*
* btstrat.c--
* Srategy map entries for the btree indexed access method
* Srategy map entries for the btree indexed access method
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.9 1997/08/20 02:01:42 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.10 1997/09/07 04:38:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <postgres.h>
#include <access/hash.h>
#include <access/istrat.h>
/*
* only one valid strategy for hash tables: equality.
/*
* only one valid strategy for hash tables: equality.
*/
#ifdef NOT_USED
static StrategyNumber HTNegate[1] = {
InvalidStrategy
static StrategyNumber HTNegate[1] = {
InvalidStrategy
};
static StrategyNumber HTCommute[1] = {
HTEqualStrategyNumber
static StrategyNumber HTCommute[1] = {
HTEqualStrategyNumber
};
static StrategyNumber HTNegateCommute[1] = {
InvalidStrategy
static StrategyNumber HTNegateCommute[1] = {
InvalidStrategy
};
static StrategyEvaluationData HTEvaluationData = {
/* XXX static for simplicity */
static StrategyEvaluationData HTEvaluationData = {
/* XXX static for simplicity */
HTMaxStrategyNumber,
(StrategyTransformMap)HTNegate,
(StrategyTransformMap)HTCommute,
(StrategyTransformMap)HTNegateCommute,
{NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}
HTMaxStrategyNumber,
(StrategyTransformMap) HTNegate,
(StrategyTransformMap) HTCommute,
(StrategyTransformMap) HTNegateCommute,
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}
};
#endif
/* ----------------------------------------------------------------
* RelationGetHashStrategy
* RelationGetHashStrategy
* ----------------------------------------------------------------
*/
#ifdef NOT_USED
static StrategyNumber
static StrategyNumber
_hash_getstrat(Relation rel,
AttrNumber attno,
RegProcedure proc)
AttrNumber attno,
RegProcedure proc)
{
StrategyNumber strat;
StrategyNumber strat;
strat = RelationGetStrategy(rel, attno, &HTEvaluationData, proc);
strat = RelationGetStrategy(rel, attno, &HTEvaluationData, proc);
Assert(StrategyNumberIsValid(strat));
Assert(StrategyNumberIsValid(strat));
return (strat);
return (strat);
}
#endif
#ifdef NOT_USED
static bool
static bool
_hash_invokestrat(Relation rel,
AttrNumber attno,
StrategyNumber strat,
Datum left,
Datum right)
AttrNumber attno,
StrategyNumber strat,
Datum left,
Datum right)
{
return (RelationInvokeStrategy(rel, &HTEvaluationData, attno, strat,
left, right));
return (RelationInvokeStrategy(rel, &HTEvaluationData, attno, strat,
left, right));
}
#endif

View File

@@ -1,109 +1,110 @@
/*-------------------------------------------------------------------------
*
* btutils.c--
* Utility code for Postgres btree implementation.
* Utility code for Postgres btree implementation.
*
* Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.9 1997/08/14 05:01:32 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.10 1997/09/07 04:38:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <postgres.h>
#include <access/hash.h>
#include <fmgr.h>
#include <utils/memutils.h>
#include <access/iqual.h>
#ifndef HAVE_MEMMOVE
# include <regex/utils.h>
#include <regex/utils.h>
#else
# include <string.h>
#include <string.h>
#endif
ScanKey
_hash_mkscankey(Relation rel, IndexTuple itup, HashMetaPage metap)
{
ScanKey skey;
TupleDesc itupdesc;
int natts;
AttrNumber i;
Datum arg;
RegProcedure proc;
bool null;
natts = rel->rd_rel->relnatts;
itupdesc = RelationGetTupleDescriptor(rel);
skey = (ScanKey) palloc(natts * sizeof(ScanKeyData));
for (i = 0; i < natts; i++) {
arg = index_getattr(itup, i + 1, itupdesc, &null);
proc = metap->hashm_procid;
ScanKeyEntryInitialize(&skey[i],
0x0, (AttrNumber) (i + 1), proc, arg);
}
return (skey);
}
ScanKey skey;
TupleDesc itupdesc;
int natts;
AttrNumber i;
Datum arg;
RegProcedure proc;
bool null;
natts = rel->rd_rel->relnatts;
itupdesc = RelationGetTupleDescriptor(rel);
skey = (ScanKey) palloc(natts * sizeof(ScanKeyData));
for (i = 0; i < natts; i++)
{
arg = index_getattr(itup, i + 1, itupdesc, &null);
proc = metap->hashm_procid;
ScanKeyEntryInitialize(&skey[i],
0x0, (AttrNumber) (i + 1), proc, arg);
}
return (skey);
}
void
_hash_freeskey(ScanKey skey)
{
pfree(skey);
pfree(skey);
}
bool
_hash_checkqual(IndexScanDesc scan, IndexTuple itup)
{
if (scan->numberOfKeys > 0)
return (index_keytest(itup,
RelationGetTupleDescriptor(scan->relation),
scan->numberOfKeys, scan->keyData));
else
return (true);
if (scan->numberOfKeys > 0)
return (index_keytest(itup,
RelationGetTupleDescriptor(scan->relation),
scan->numberOfKeys, scan->keyData));
else
return (true);
}
HashItem
_hash_formitem(IndexTuple itup)
{
int nbytes_hitem;
HashItem hitem;
Size tuplen;
/* disallow nulls in hash keys */
if (itup->t_info & INDEX_NULL_MASK)
elog(WARN, "hash indices cannot include null keys");
/* make a copy of the index tuple with room for the sequence number */
tuplen = IndexTupleSize(itup);
nbytes_hitem = tuplen +
(sizeof(HashItemData) - sizeof(IndexTupleData));
hitem = (HashItem) palloc(nbytes_hitem);
memmove((char *) &(hitem->hash_itup), (char *) itup, tuplen);
return (hitem);
int nbytes_hitem;
HashItem hitem;
Size tuplen;
/* disallow nulls in hash keys */
if (itup->t_info & INDEX_NULL_MASK)
elog(WARN, "hash indices cannot include null keys");
/* make a copy of the index tuple with room for the sequence number */
tuplen = IndexTupleSize(itup);
nbytes_hitem = tuplen +
(sizeof(HashItemData) - sizeof(IndexTupleData));
hitem = (HashItem) palloc(nbytes_hitem);
memmove((char *) &(hitem->hash_itup), (char *) itup, tuplen);
return (hitem);
}
Bucket
_hash_call(Relation rel, HashMetaPage metap, Datum key)
{
uint32 n;
Bucket bucket;
RegProcedure proc;
proc = metap->hashm_procid;
n = (uint32) fmgr(proc, key);
bucket = n & metap->hashm_highmask;
if (bucket > metap->hashm_maxbucket)
bucket = bucket & metap->hashm_lowmask;
return (bucket);
uint32 n;
Bucket bucket;
RegProcedure proc;
proc = metap->hashm_procid;
n = (uint32) fmgr(proc, key);
bucket = n & metap->hashm_highmask;
if (bucket > metap->hashm_maxbucket)
bucket = bucket & metap->hashm_lowmask;
return (bucket);
}
/*
@@ -112,12 +113,13 @@ _hash_call(Relation rel, HashMetaPage metap, Datum key)
uint32
_hash_log2(uint32 num)
{
uint32 i, limit;
limit = 1;
for (i = 0; limit < num; limit = limit << 1, i++)
;
return (i);
uint32 i,
limit;
limit = 1;
for (i = 0; limit < num; limit = limit << 1, i++)
;
return (i);
}
/*
@@ -126,19 +128,20 @@ _hash_log2(uint32 num)
void
_hash_checkpage(Page page, int flags)
{
HashPageOpaque opaque;
HashPageOpaque opaque;
Assert(page);
Assert(((PageHeader)(page))->pd_lower >= (sizeof(PageHeaderData) - sizeof(ItemIdData)));
Assert(page);
Assert(((PageHeader) (page))->pd_lower >= (sizeof(PageHeaderData) - sizeof(ItemIdData)));
#if 1
Assert(((PageHeader)(page))->pd_upper <=
(BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData))));
Assert(((PageHeader)(page))->pd_special ==
(BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData))));
Assert(((PageHeader)(page))->pd_opaque.od_pagesize == BLCKSZ);
Assert(((PageHeader) (page))->pd_upper <=
(BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData))));
Assert(((PageHeader) (page))->pd_special ==
(BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData))));
Assert(((PageHeader) (page))->pd_opaque.od_pagesize == BLCKSZ);
#endif
if (flags) {
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_flag & flags);
}
if (flags)
{
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_flag & flags);
}
}