mirror of
https://github.com/postgres/postgres.git
synced 2025-11-10 17:42:29 +03:00
pgindent run.
This commit is contained in:
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.81 2002/09/02 01:05:03 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.82 2002/09/04 20:31:08 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The old interface functions have been converted to macros
|
||||
@@ -326,9 +326,9 @@ nocachegetattr(HeapTuple tuple,
|
||||
|
||||
/*
|
||||
* If slow is false, and we got here, we know that we have a tuple
|
||||
* with no nulls or var-widths before the target attribute. If possible,
|
||||
* we also want to initialize the remainder of the attribute cached
|
||||
* offset values.
|
||||
* with no nulls or var-widths before the target attribute. If
|
||||
* possible, we also want to initialize the remainder of the attribute
|
||||
* cached offset values.
|
||||
*/
|
||||
if (!slow)
|
||||
{
|
||||
@@ -702,8 +702,8 @@ heap_modifytuple(HeapTuple tuple,
|
||||
nulls);
|
||||
|
||||
/*
|
||||
* copy the identification info of the old tuple: t_ctid, t_self,
|
||||
* and OID (if any)
|
||||
* copy the identification info of the old tuple: t_ctid, t_self, and
|
||||
* OID (if any)
|
||||
*/
|
||||
newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
|
||||
newTuple->t_self = tuple->t_self;
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.59 2002/08/25 17:20:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.60 2002/09/04 20:31:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -319,9 +319,9 @@ nocache_index_getattr(IndexTuple tup,
|
||||
|
||||
/*
|
||||
* If slow is false, and we got here, we know that we have a tuple
|
||||
* with no nulls or var-widths before the target attribute. If possible,
|
||||
* we also want to initialize the remainder of the attribute cached
|
||||
* offset values.
|
||||
* with no nulls or var-widths before the target attribute. If
|
||||
* possible, we also want to initialize the remainder of the attribute
|
||||
* cached offset values.
|
||||
*/
|
||||
if (!slow)
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.64 2002/08/24 15:00:46 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.65 2002/09/04 20:31:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
|
||||
static void printtup_setup(DestReceiver *self, int operation,
|
||||
const char *portalName, TupleDesc typeinfo);
|
||||
const char *portalName, TupleDesc typeinfo);
|
||||
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self);
|
||||
static void printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self);
|
||||
static void printtup_cleanup(DestReceiver *self);
|
||||
@@ -88,8 +88,8 @@ printtup_setup(DestReceiver *self, int operation,
|
||||
pq_puttextmessage('P', portalName);
|
||||
|
||||
/*
|
||||
* if this is a retrieve, then we send back the tuple
|
||||
* descriptor of the tuples.
|
||||
* if this is a retrieve, then we send back the tuple descriptor of
|
||||
* the tuples.
|
||||
*/
|
||||
if (operation == CMD_SELECT)
|
||||
{
|
||||
@@ -100,7 +100,7 @@ printtup_setup(DestReceiver *self, int operation,
|
||||
|
||||
pq_beginmessage(&buf);
|
||||
pq_sendbyte(&buf, 'T'); /* tuple descriptor message type */
|
||||
pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
|
||||
pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
|
||||
|
||||
for (i = 0; i < natts; ++i)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.88 2002/09/02 01:05:03 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.89 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* some of the executor utility code such as "ExecTypeFromTL" should be
|
||||
@@ -114,8 +114,8 @@ CreateTupleDescCopy(TupleDesc tupdesc)
|
||||
{
|
||||
desc->attrs[i] = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
|
||||
memcpy(desc->attrs[i],
|
||||
tupdesc->attrs[i],
|
||||
ATTRIBUTE_TUPLE_SIZE);
|
||||
tupdesc->attrs[i],
|
||||
ATTRIBUTE_TUPLE_SIZE);
|
||||
desc->attrs[i]->attnotnull = false;
|
||||
desc->attrs[i]->atthasdef = false;
|
||||
}
|
||||
@@ -148,8 +148,8 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc)
|
||||
{
|
||||
desc->attrs[i] = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
|
||||
memcpy(desc->attrs[i],
|
||||
tupdesc->attrs[i],
|
||||
ATTRIBUTE_TUPLE_SIZE);
|
||||
tupdesc->attrs[i],
|
||||
ATTRIBUTE_TUPLE_SIZE);
|
||||
}
|
||||
if (constr)
|
||||
{
|
||||
@@ -425,9 +425,8 @@ TupleDescInitEntry(TupleDesc desc,
|
||||
*
|
||||
* (Why not just make the atttypid point to the OID type, instead of the
|
||||
* type the query returns? Because the executor uses the atttypid to
|
||||
* tell the front end what type will be returned,
|
||||
* and in the end the type returned will be the result of the query,
|
||||
* not an OID.)
|
||||
* tell the front end what type will be returned, and in the end the
|
||||
* type returned will be the result of the query, not an OID.)
|
||||
*
|
||||
* (Why not wait until the return type of the set is known (i.e., the
|
||||
* recursive call to the executor to execute the set has returned)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.95 2002/06/20 20:29:24 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -294,6 +294,7 @@ gistinsert(PG_FUNCTION_ARGS)
|
||||
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
|
||||
char *nulls = (char *) PG_GETARG_POINTER(2);
|
||||
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
|
||||
|
||||
#ifdef NOT_USED
|
||||
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
|
||||
bool checkUnique = PG_GETARG_BOOL(5);
|
||||
@@ -494,13 +495,13 @@ gistlayerinsert(Relation r, BlockNumber blkno,
|
||||
/* key is modified, so old version must be deleted */
|
||||
ItemPointerSet(&oldtid, blkno, child);
|
||||
gistdelete(r, &oldtid);
|
||||
|
||||
|
||||
/*
|
||||
* if child was splitted, new key for child will be inserted
|
||||
* in the end list of child, so we must say to any scans
|
||||
* that page is changed beginning from 'child' offset
|
||||
* if child was splitted, new key for child will be inserted in
|
||||
* the end list of child, so we must say to any scans that page is
|
||||
* changed beginning from 'child' offset
|
||||
*/
|
||||
if ( ret & SPLITED )
|
||||
if (ret & SPLITED)
|
||||
gistadjscans(r, GISTOP_SPLIT, blkno, child);
|
||||
}
|
||||
|
||||
@@ -615,7 +616,7 @@ gistwritebuffer(Relation r, Page page, IndexTuple *itup,
|
||||
static int
|
||||
gistnospace(Page page, IndexTuple *itvec, int len)
|
||||
{
|
||||
unsigned int size = 0;
|
||||
unsigned int size = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
@@ -679,7 +680,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
|
||||
|
||||
needfree = (bool *) palloc(((len == 1) ? 2 : len) * sizeof(bool));
|
||||
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
|
||||
storage = (char*)palloc( ((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
storage = (char *) palloc(((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
|
||||
|
||||
for (j = 0; j < r->rd_att->natts; j++)
|
||||
@@ -786,7 +787,7 @@ gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gis
|
||||
int j;
|
||||
|
||||
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
|
||||
storage = (char*) palloc( 2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
storage = (char *) palloc(2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
|
||||
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
|
||||
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
|
||||
@@ -911,7 +912,7 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV
|
||||
|
||||
needfree = (bool *) palloc(((len == 1) ? 2 : len) * sizeof(bool));
|
||||
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
|
||||
storage = (char*)palloc( ((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
storage = (char *) palloc(((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
|
||||
|
||||
for (j = 1; j < r->rd_att->natts; j++)
|
||||
@@ -1098,7 +1099,7 @@ gistadjsubkey(Relation r,
|
||||
v->spl_nright = curlen;
|
||||
|
||||
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
|
||||
storage = (char*)palloc( 2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
storage = (char *) palloc(2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
|
||||
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
|
||||
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
|
||||
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
|
||||
@@ -1276,7 +1277,7 @@ gistSplit(Relation r,
|
||||
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
|
||||
storage = palloc(MAXALIGN(VARHDRSZ) + (*len + 1) * sizeof(GISTENTRY));
|
||||
entryvec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
|
||||
decompvec = (bool *) palloc( (*len + 1) * sizeof(bool));
|
||||
decompvec = (bool *) palloc((*len + 1) * sizeof(bool));
|
||||
VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
|
||||
for (i = 1; i <= *len; i++)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.34 2002/06/20 20:29:24 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.35 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -33,7 +33,7 @@ gistgettuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
|
||||
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
|
||||
bool res;
|
||||
bool res;
|
||||
|
||||
/* if we have it cached in the scan desc, just return the value */
|
||||
if (gistscancache(s, dir))
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.59 2002/06/20 20:29:24 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.60 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
@@ -164,6 +164,7 @@ hashinsert(PG_FUNCTION_ARGS)
|
||||
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
|
||||
char *nulls = (char *) PG_GETARG_POINTER(2);
|
||||
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
|
||||
|
||||
#ifdef NOT_USED
|
||||
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
|
||||
bool checkUnique = PG_GETARG_BOOL(5);
|
||||
@@ -213,7 +214,7 @@ hashgettuple(PG_FUNCTION_ARGS)
|
||||
HashScanOpaque so = (HashScanOpaque) scan->opaque;
|
||||
Page page;
|
||||
OffsetNumber offnum;
|
||||
bool res;
|
||||
bool res;
|
||||
|
||||
/*
|
||||
* If we've already initialized this scan, we can just advance it in
|
||||
@@ -228,18 +229,21 @@ hashgettuple(PG_FUNCTION_ARGS)
|
||||
if (scan->kill_prior_tuple)
|
||||
{
|
||||
/*
|
||||
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
|
||||
* Yes, so mark it by setting the LP_DELETE bit in the item
|
||||
* flags.
|
||||
*/
|
||||
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
|
||||
page = BufferGetPage(so->hashso_curbuf);
|
||||
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
|
||||
|
||||
/*
|
||||
* Since this can be redone later if needed, it's treated the
|
||||
* same as a commit-hint-bit status update for heap tuples:
|
||||
* we mark the buffer dirty but don't make a WAL log entry.
|
||||
* same as a commit-hint-bit status update for heap tuples: we
|
||||
* mark the buffer dirty but don't make a WAL log entry.
|
||||
*/
|
||||
SetBufferCommitInfoNeedsSave(so->hashso_curbuf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now continue the scan.
|
||||
*/
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.34 2002/06/20 20:29:24 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.35 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
@@ -96,7 +96,8 @@ hashname(PG_FUNCTION_ARGS)
|
||||
char *key = NameStr(*PG_GETARG_NAME(0));
|
||||
int keylen = strlen(key);
|
||||
|
||||
Assert(keylen < NAMEDATALEN); /* else it's not truncated correctly */
|
||||
Assert(keylen < NAMEDATALEN); /* else it's not truncated
|
||||
* correctly */
|
||||
|
||||
return hash_any((unsigned char *) key, keylen);
|
||||
}
|
||||
@@ -134,9 +135,9 @@ hashvarlena(PG_FUNCTION_ARGS)
|
||||
* high bits or all three low bits, whether the original value of a,b,c
|
||||
* is almost all zero or is uniformly distributed,
|
||||
* - If mix() is run forward or backward, at least 32 bits in a,b,c
|
||||
* have at least 1/4 probability of changing.
|
||||
* have at least 1/4 probability of changing.
|
||||
* - If mix() is run forward, every bit of c will change between 1/3 and
|
||||
* 2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
|
||||
* 2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
|
||||
*----------
|
||||
*/
|
||||
#define mix(a,b,c) \
|
||||
@@ -147,17 +148,17 @@ hashvarlena(PG_FUNCTION_ARGS)
|
||||
a -= b; a -= c; a ^= (c>>12); \
|
||||
b -= c; b -= a; b ^= (a<<16); \
|
||||
c -= a; c -= b; c ^= (b>>5); \
|
||||
a -= b; a -= c; a ^= (c>>3); \
|
||||
a -= b; a -= c; a ^= (c>>3); \
|
||||
b -= c; b -= a; b ^= (a<<10); \
|
||||
c -= a; c -= b; c ^= (b>>15); \
|
||||
}
|
||||
|
||||
/*
|
||||
* hash_any() -- hash a variable-length key into a 32-bit value
|
||||
* k : the key (the unaligned variable-length array of bytes)
|
||||
* len : the length of the key, counting by bytes
|
||||
* k : the key (the unaligned variable-length array of bytes)
|
||||
* len : the length of the key, counting by bytes
|
||||
*
|
||||
* Returns a uint32 value. Every bit of the key affects every bit of
|
||||
* Returns a uint32 value. Every bit of the key affects every bit of
|
||||
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
|
||||
* About 6*len+35 instructions. The best hash table sizes are powers
|
||||
* of 2. There is no need to do mod a prime (mod is sooo slow!).
|
||||
@@ -166,7 +167,10 @@ hashvarlena(PG_FUNCTION_ARGS)
|
||||
Datum
|
||||
hash_any(register const unsigned char *k, register int keylen)
|
||||
{
|
||||
register uint32 a,b,c,len;
|
||||
register uint32 a,
|
||||
b,
|
||||
c,
|
||||
len;
|
||||
|
||||
/* Set up the internal state */
|
||||
len = keylen;
|
||||
@@ -176,32 +180,44 @@ hash_any(register const unsigned char *k, register int keylen)
|
||||
/* handle most of the key */
|
||||
while (len >= 12)
|
||||
{
|
||||
a += (k[0] +((uint32)k[1]<<8) +((uint32)k[2]<<16) +((uint32)k[3]<<24));
|
||||
b += (k[4] +((uint32)k[5]<<8) +((uint32)k[6]<<16) +((uint32)k[7]<<24));
|
||||
c += (k[8] +((uint32)k[9]<<8) +((uint32)k[10]<<16)+((uint32)k[11]<<24));
|
||||
mix(a,b,c);
|
||||
k += 12; len -= 12;
|
||||
a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24));
|
||||
b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24));
|
||||
c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24));
|
||||
mix(a, b, c);
|
||||
k += 12;
|
||||
len -= 12;
|
||||
}
|
||||
|
||||
/* handle the last 11 bytes */
|
||||
c += keylen;
|
||||
switch (len) /* all the case statements fall through */
|
||||
{
|
||||
case 11: c+=((uint32)k[10]<<24);
|
||||
case 10: c+=((uint32)k[9]<<16);
|
||||
case 9 : c+=((uint32)k[8]<<8);
|
||||
case 11:
|
||||
c += ((uint32) k[10] << 24);
|
||||
case 10:
|
||||
c += ((uint32) k[9] << 16);
|
||||
case 9:
|
||||
c += ((uint32) k[8] << 8);
|
||||
/* the first byte of c is reserved for the length */
|
||||
case 8 : b+=((uint32)k[7]<<24);
|
||||
case 7 : b+=((uint32)k[6]<<16);
|
||||
case 6 : b+=((uint32)k[5]<<8);
|
||||
case 5 : b+=k[4];
|
||||
case 4 : a+=((uint32)k[3]<<24);
|
||||
case 3 : a+=((uint32)k[2]<<16);
|
||||
case 2 : a+=((uint32)k[1]<<8);
|
||||
case 1 : a+=k[0];
|
||||
case 8:
|
||||
b += ((uint32) k[7] << 24);
|
||||
case 7:
|
||||
b += ((uint32) k[6] << 16);
|
||||
case 6:
|
||||
b += ((uint32) k[5] << 8);
|
||||
case 5:
|
||||
b += k[4];
|
||||
case 4:
|
||||
a += ((uint32) k[3] << 24);
|
||||
case 3:
|
||||
a += ((uint32) k[2] << 16);
|
||||
case 2:
|
||||
a += ((uint32) k[1] << 8);
|
||||
case 1:
|
||||
a += k[0];
|
||||
/* case 0: nothing left to add */
|
||||
}
|
||||
mix(a,b,c);
|
||||
mix(a, b, c);
|
||||
/* report the result */
|
||||
return UInt32GetDatum(c);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.28 2002/06/20 20:29:24 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.29 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Because we can be doing an index scan on a relation while we
|
||||
@@ -45,7 +45,7 @@ static HashScanList HashScans = (HashScanList) NULL;
|
||||
|
||||
|
||||
static void _hash_scandel(IndexScanDesc scan,
|
||||
BlockNumber blkno, OffsetNumber offno);
|
||||
BlockNumber blkno, OffsetNumber offno);
|
||||
|
||||
|
||||
/*
|
||||
@@ -158,7 +158,7 @@ _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
|
||||
* then step backwards (affecting current), then exchange again.
|
||||
*/
|
||||
ItemPointerData tmpitem;
|
||||
Buffer tmpbuf;
|
||||
Buffer tmpbuf;
|
||||
|
||||
tmpitem = *mark;
|
||||
*mark = *current;
|
||||
|
||||
@@ -8,12 +8,12 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.147 2002/09/02 01:05:03 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.148 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* relation_open - open any relation by relation OID
|
||||
* relation_openrv - open any relation specified by a RangeVar
|
||||
* relation_openrv - open any relation specified by a RangeVar
|
||||
* relation_openr - open a system relation by name
|
||||
* relation_close - close any relation
|
||||
* heap_open - open a heap relation by relation OID
|
||||
@@ -306,7 +306,7 @@ heapgettup(Relation relation,
|
||||
{
|
||||
if (ItemIdIsUsed(lpp))
|
||||
{
|
||||
bool valid;
|
||||
bool valid;
|
||||
|
||||
tuple->t_datamcxt = NULL;
|
||||
tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
|
||||
@@ -985,8 +985,8 @@ heap_fetch(Relation relation,
|
||||
*userbuf = buffer;
|
||||
|
||||
/*
|
||||
* Count the successful fetch in *pgstat_info if given,
|
||||
* otherwise in the relation's default statistics area.
|
||||
* Count the successful fetch in *pgstat_info if given, otherwise
|
||||
* in the relation's default statistics area.
|
||||
*/
|
||||
if (pgstat_info != NULL)
|
||||
pgstat_count_heap_fetch(pgstat_info);
|
||||
@@ -1120,6 +1120,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
|
||||
/* this is redundant with an Assert in HeapTupleSetOid */
|
||||
Assert(tup->t_data->t_infomask & HEAP_HASOID);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the object id of this tuple has already been assigned, trust
|
||||
* the caller. There are a couple of ways this can happen. At
|
||||
@@ -1224,10 +1225,10 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
|
||||
WriteBuffer(buffer);
|
||||
|
||||
/*
|
||||
* If tuple is cachable, mark it for invalidation from the caches in case
|
||||
* we abort. Note it is OK to do this after WriteBuffer releases the
|
||||
* buffer, because the "tup" data structure is all in local memory,
|
||||
* not in the shared buffer.
|
||||
* If tuple is cachable, mark it for invalidation from the caches in
|
||||
* case we abort. Note it is OK to do this after WriteBuffer releases
|
||||
* the buffer, because the "tup" data structure is all in local
|
||||
* memory, not in the shared buffer.
|
||||
*/
|
||||
CacheInvalidateHeapTuple(relation, tup);
|
||||
|
||||
@@ -1379,6 +1380,7 @@ l1:
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
#ifdef TUPLE_TOASTER_ACTIVE
|
||||
|
||||
/*
|
||||
* If the relation has toastable attributes, we need to delete no
|
||||
* longer needed items there too. We have to do this before
|
||||
@@ -1728,10 +1730,10 @@ l2:
|
||||
WriteBuffer(buffer);
|
||||
|
||||
/*
|
||||
* If new tuple is cachable, mark it for invalidation from the caches in
|
||||
* case we abort. Note it is OK to do this after WriteBuffer releases
|
||||
* the buffer, because the "newtup" data structure is all in local
|
||||
* memory, not in the shared buffer.
|
||||
* If new tuple is cachable, mark it for invalidation from the caches
|
||||
* in case we abort. Note it is OK to do this after WriteBuffer
|
||||
* releases the buffer, because the "newtup" data structure is all in
|
||||
* local memory, not in the shared buffer.
|
||||
*/
|
||||
CacheInvalidateHeapTuple(relation, newtup);
|
||||
|
||||
@@ -2045,16 +2047,16 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
|
||||
xlhdr.hdr.mask = newtup->t_data->t_infomask;
|
||||
if (move) /* remember xmin & xmax */
|
||||
{
|
||||
TransactionId xid[2]; /* xmax, xmin */
|
||||
TransactionId xid[2]; /* xmax, xmin */
|
||||
|
||||
if (newtup->t_data->t_infomask & (HEAP_XMAX_INVALID |
|
||||
HEAP_MARKED_FOR_UPDATE))
|
||||
HEAP_MARKED_FOR_UPDATE))
|
||||
xid[0] = InvalidTransactionId;
|
||||
else
|
||||
xid[0] = HeapTupleHeaderGetXmax(newtup->t_data);
|
||||
xid[1] = HeapTupleHeaderGetXmin(newtup->t_data);
|
||||
memcpy((char *) &xlhdr + hsize,
|
||||
(char *) xid,
|
||||
(char *) xid,
|
||||
2 * sizeof(TransactionId));
|
||||
hsize += 2 * sizeof(TransactionId);
|
||||
}
|
||||
@@ -2143,7 +2145,7 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
PageRepairFragmentation(page, NULL);
|
||||
|
||||
PageSetLSN(page, lsn);
|
||||
PageSetSUI(page, ThisStartUpID); /* prev sui */
|
||||
PageSetSUI(page, ThisStartUpID); /* prev sui */
|
||||
UnlockAndWriteBuffer(buffer);
|
||||
}
|
||||
|
||||
@@ -2463,11 +2465,11 @@ newsame:;
|
||||
|
||||
if (move)
|
||||
{
|
||||
TransactionId xid[2]; /* xmax, xmin */
|
||||
|
||||
TransactionId xid[2]; /* xmax, xmin */
|
||||
|
||||
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
|
||||
memcpy((char *) xid,
|
||||
(char *) xlrec + hsize, 2 * sizeof(TransactionId));
|
||||
(char *) xlrec + hsize, 2 * sizeof(TransactionId));
|
||||
htup->t_infomask = xlhdr.mask;
|
||||
htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
|
||||
HEAP_XMIN_INVALID |
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.35 2002/09/02 01:05:03 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -46,7 +46,7 @@ static void toast_insert_or_update(Relation rel, HeapTuple newtup,
|
||||
static Datum toast_save_datum(Relation rel, Datum value);
|
||||
static varattrib *toast_fetch_datum(varattrib *attr);
|
||||
static varattrib *toast_fetch_datum_slice(varattrib *attr,
|
||||
int32 sliceoffset, int32 length);
|
||||
int32 sliceoffset, int32 length);
|
||||
|
||||
|
||||
/* ----------
|
||||
@@ -165,73 +165,68 @@ heap_tuple_untoast_attr(varattrib *attr)
|
||||
/* ----------
|
||||
* heap_tuple_untoast_attr_slice -
|
||||
*
|
||||
* Public entry point to get back part of a toasted value
|
||||
* from compression or external storage.
|
||||
* Public entry point to get back part of a toasted value
|
||||
* from compression or external storage.
|
||||
* ----------
|
||||
*/
|
||||
varattrib *
|
||||
varattrib *
|
||||
heap_tuple_untoast_attr_slice(varattrib *attr, int32 sliceoffset, int32 slicelength)
|
||||
{
|
||||
varattrib *preslice;
|
||||
varattrib *result;
|
||||
int32 attrsize;
|
||||
|
||||
int32 attrsize;
|
||||
|
||||
if (VARATT_IS_COMPRESSED(attr))
|
||||
{
|
||||
varattrib *tmp;
|
||||
|
||||
varattrib *tmp;
|
||||
|
||||
if (VARATT_IS_EXTERNAL(attr))
|
||||
{
|
||||
tmp = toast_fetch_datum(attr);
|
||||
}
|
||||
else
|
||||
{
|
||||
tmp = attr; /* compressed in main tuple */
|
||||
tmp = attr; /* compressed in main tuple */
|
||||
}
|
||||
|
||||
|
||||
preslice = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
|
||||
+ VARHDRSZ);
|
||||
VARATT_SIZEP(preslice) = attr->va_content.va_external.va_rawsize + VARHDRSZ;
|
||||
pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(preslice));
|
||||
|
||||
if (tmp != attr)
|
||||
|
||||
if (tmp != attr)
|
||||
pfree(tmp);
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
/* Plain value */
|
||||
if (VARATT_IS_EXTERNAL(attr))
|
||||
{
|
||||
{
|
||||
/* fast path */
|
||||
return (toast_fetch_datum_slice(attr, sliceoffset, slicelength));
|
||||
}
|
||||
else
|
||||
{
|
||||
preslice = attr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* slicing of datum for compressed cases and plain value */
|
||||
|
||||
|
||||
attrsize = VARSIZE(preslice) - VARHDRSZ;
|
||||
if (sliceoffset >= attrsize)
|
||||
if (sliceoffset >= attrsize)
|
||||
{
|
||||
sliceoffset = 0;
|
||||
slicelength = 0;
|
||||
}
|
||||
|
||||
|
||||
if (((sliceoffset + slicelength) > attrsize) || slicelength < 0)
|
||||
{
|
||||
slicelength = attrsize - sliceoffset;
|
||||
}
|
||||
|
||||
|
||||
result = (varattrib *) palloc(slicelength + VARHDRSZ);
|
||||
VARATT_SIZEP(result) = slicelength + VARHDRSZ;
|
||||
|
||||
|
||||
memcpy(VARDATA(result), VARDATA(preslice) + sliceoffset, slicelength);
|
||||
|
||||
if (preslice != attr) pfree(preslice);
|
||||
|
||||
|
||||
if (preslice != attr)
|
||||
pfree(preslice);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -1053,9 +1048,9 @@ toast_fetch_datum(varattrib *attr)
|
||||
/*
|
||||
* Read the chunks by index
|
||||
*
|
||||
* Note that because the index is actually on (valueid, chunkidx)
|
||||
* we will see the chunks in chunkidx order, even though we didn't
|
||||
* explicitly ask for it.
|
||||
* Note that because the index is actually on (valueid, chunkidx) we will
|
||||
* see the chunks in chunkidx order, even though we didn't explicitly
|
||||
* ask for it.
|
||||
*/
|
||||
nextidx = 0;
|
||||
|
||||
@@ -1146,45 +1141,44 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
|
||||
varattrib *result;
|
||||
int32 attrsize;
|
||||
int32 residx;
|
||||
int32 nextidx;
|
||||
int numchunks;
|
||||
int startchunk;
|
||||
int endchunk;
|
||||
int32 nextidx;
|
||||
int numchunks;
|
||||
int startchunk;
|
||||
int endchunk;
|
||||
int32 startoffset;
|
||||
int32 endoffset;
|
||||
int totalchunks;
|
||||
int totalchunks;
|
||||
Pointer chunk;
|
||||
bool isnull;
|
||||
int32 chunksize;
|
||||
int32 chcpystrt;
|
||||
int32 chcpyend;
|
||||
int32 chcpystrt;
|
||||
int32 chcpyend;
|
||||
|
||||
attrsize = attr->va_content.va_external.va_extsize;
|
||||
totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
|
||||
|
||||
if (sliceoffset >= attrsize)
|
||||
if (sliceoffset >= attrsize)
|
||||
{
|
||||
sliceoffset = 0;
|
||||
length = 0;
|
||||
sliceoffset = 0;
|
||||
length = 0;
|
||||
}
|
||||
|
||||
if (((sliceoffset + length) > attrsize) || length < 0)
|
||||
{
|
||||
length = attrsize - sliceoffset;
|
||||
}
|
||||
length = attrsize - sliceoffset;
|
||||
|
||||
result = (varattrib *) palloc(length + VARHDRSZ);
|
||||
VARATT_SIZEP(result) = length + VARHDRSZ;
|
||||
|
||||
if (VARATT_IS_COMPRESSED(attr))
|
||||
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
|
||||
|
||||
if (length == 0) return (result); /* Can save a lot of work at this point! */
|
||||
|
||||
if (length == 0)
|
||||
return (result); /* Can save a lot of work at this point! */
|
||||
|
||||
startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
|
||||
endchunk = (sliceoffset + length - 1) / TOAST_MAX_CHUNK_SIZE;
|
||||
numchunks = (endchunk - startchunk ) + 1;
|
||||
|
||||
numchunks = (endchunk - startchunk) + 1;
|
||||
|
||||
startoffset = sliceoffset % TOAST_MAX_CHUNK_SIZE;
|
||||
endoffset = (sliceoffset + length - 1) % TOAST_MAX_CHUNK_SIZE;
|
||||
|
||||
@@ -1204,33 +1198,34 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
|
||||
(bits16) 0,
|
||||
(AttrNumber) 1,
|
||||
(RegProcedure) F_OIDEQ,
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
|
||||
/*
|
||||
* Now dependent on number of chunks:
|
||||
*/
|
||||
|
||||
if (numchunks == 1)
|
||||
|
||||
if (numchunks == 1)
|
||||
{
|
||||
ScanKeyEntryInitialize(&toastkey[1],
|
||||
ScanKeyEntryInitialize(&toastkey[1],
|
||||
(bits16) 0,
|
||||
(AttrNumber) 2,
|
||||
(RegProcedure) F_INT4EQ,
|
||||
Int32GetDatum(startchunk));
|
||||
nscankeys = 2;
|
||||
nscankeys = 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
ScanKeyEntryInitialize(&toastkey[1],
|
||||
ScanKeyEntryInitialize(&toastkey[1],
|
||||
(bits16) 0,
|
||||
(AttrNumber) 2,
|
||||
(RegProcedure) F_INT4GE,
|
||||
Int32GetDatum(startchunk));
|
||||
ScanKeyEntryInitialize(&toastkey[2],
|
||||
ScanKeyEntryInitialize(&toastkey[2],
|
||||
(bits16) 0,
|
||||
(AttrNumber) 2,
|
||||
(RegProcedure) F_INT4LE,
|
||||
Int32GetDatum(endchunk));
|
||||
nscankeys = 3;
|
||||
nscankeys = 3;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1279,21 +1274,23 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
|
||||
*/
|
||||
chcpystrt = 0;
|
||||
chcpyend = chunksize - 1;
|
||||
if (residx == startchunk) chcpystrt = startoffset;
|
||||
if (residx == endchunk) chcpyend = endoffset;
|
||||
|
||||
memcpy(((char *) VARATT_DATA(result)) +
|
||||
(residx * TOAST_MAX_CHUNK_SIZE - sliceoffset) +chcpystrt,
|
||||
if (residx == startchunk)
|
||||
chcpystrt = startoffset;
|
||||
if (residx == endchunk)
|
||||
chcpyend = endoffset;
|
||||
|
||||
memcpy(((char *) VARATT_DATA(result)) +
|
||||
(residx * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt,
|
||||
VARATT_DATA(chunk) + chcpystrt,
|
||||
(chcpyend - chcpystrt) + 1);
|
||||
|
||||
|
||||
nextidx++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Final checks that we successfully fetched the datum
|
||||
*/
|
||||
if ( nextidx != (endchunk + 1))
|
||||
if (nextidx != (endchunk + 1))
|
||||
elog(ERROR, "missing chunk number %d for toast value %u",
|
||||
nextidx,
|
||||
attr->va_content.va_external.va_valueid);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.35 2002/06/20 20:29:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* many of the old access method routines have been turned into
|
||||
@@ -77,7 +77,7 @@ RelationGetIndexScan(Relation indexRelation,
|
||||
|
||||
scan->heapRelation = NULL; /* may be set later */
|
||||
scan->indexRelation = indexRelation;
|
||||
scan->xs_snapshot = SnapshotNow; /* may be set later */
|
||||
scan->xs_snapshot = SnapshotNow; /* may be set later */
|
||||
scan->numberOfKeys = nkeys;
|
||||
|
||||
/*
|
||||
@@ -90,8 +90,8 @@ RelationGetIndexScan(Relation indexRelation,
|
||||
scan->keyData = NULL;
|
||||
|
||||
scan->kill_prior_tuple = false;
|
||||
scan->ignore_killed_tuples = true; /* default setting */
|
||||
scan->keys_are_unique = false; /* may be set by amrescan */
|
||||
scan->ignore_killed_tuples = true; /* default setting */
|
||||
scan->keys_are_unique = false; /* may be set by amrescan */
|
||||
scan->got_tuple = false;
|
||||
|
||||
scan->opaque = NULL;
|
||||
@@ -201,6 +201,7 @@ systable_beginscan(Relation heapRelation,
|
||||
|
||||
/* We assume it's a system index, so index_openr is OK */
|
||||
sysscan->irel = irel = index_openr(indexRelname);
|
||||
|
||||
/*
|
||||
* Change attribute numbers to be index column numbers.
|
||||
*
|
||||
@@ -210,7 +211,7 @@ systable_beginscan(Relation heapRelation,
|
||||
for (i = 0; i < nkeys; i++)
|
||||
{
|
||||
Assert(key[i].sk_attno == irel->rd_index->indkey[i]);
|
||||
key[i].sk_attno = i+1;
|
||||
key[i].sk_attno = i + 1;
|
||||
}
|
||||
sysscan->iscan = index_beginscan(heapRelation, irel, snapshot,
|
||||
nkeys, key);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.61 2002/06/20 20:29:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.62 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* index_open - open an index relation by relation OID
|
||||
@@ -272,8 +272,8 @@ index_beginscan(Relation heapRelation,
|
||||
PointerGetDatum(key)));
|
||||
|
||||
/*
|
||||
* Save additional parameters into the scandesc. Everything else
|
||||
* was set up by RelationGetIndexScan.
|
||||
* Save additional parameters into the scandesc. Everything else was
|
||||
* set up by RelationGetIndexScan.
|
||||
*/
|
||||
scan->heapRelation = heapRelation;
|
||||
scan->xs_snapshot = snapshot;
|
||||
@@ -293,7 +293,7 @@ index_beginscan(Relation heapRelation,
|
||||
* index_rescan - (re)start a scan of an index
|
||||
*
|
||||
* The caller may specify a new set of scankeys (but the number of keys
|
||||
* cannot change). Note that this is also called when first starting
|
||||
* cannot change). Note that this is also called when first starting
|
||||
* an indexscan; see RelationGetIndexScan.
|
||||
* ----------------
|
||||
*/
|
||||
@@ -305,8 +305,8 @@ index_rescan(IndexScanDesc scan, ScanKey key)
|
||||
SCAN_CHECKS;
|
||||
GET_SCAN_PROCEDURE(rescan, amrescan);
|
||||
|
||||
scan->kill_prior_tuple = false; /* for safety */
|
||||
scan->keys_are_unique = false; /* may be set by amrescan */
|
||||
scan->kill_prior_tuple = false; /* for safety */
|
||||
scan->keys_are_unique = false; /* may be set by amrescan */
|
||||
scan->got_tuple = false;
|
||||
|
||||
OidFunctionCall2(procedure,
|
||||
@@ -375,7 +375,7 @@ index_restrpos(IndexScanDesc scan)
|
||||
SCAN_CHECKS;
|
||||
GET_SCAN_PROCEDURE(restrpos, amrestrpos);
|
||||
|
||||
scan->kill_prior_tuple = false; /* for safety */
|
||||
scan->kill_prior_tuple = false; /* for safety */
|
||||
scan->got_tuple = false;
|
||||
|
||||
OidFunctionCall1(procedure, PointerGetDatum(scan));
|
||||
@@ -385,7 +385,7 @@ index_restrpos(IndexScanDesc scan)
|
||||
* index_getnext - get the next heap tuple from a scan
|
||||
*
|
||||
* The result is the next heap tuple satisfying the scan keys and the
|
||||
* snapshot, or NULL if no more matching tuples exist. On success,
|
||||
* snapshot, or NULL if no more matching tuples exist. On success,
|
||||
* the buffer containing the heap tuple is pinned (the pin will be dropped
|
||||
* at the next index_getnext or index_endscan). The index TID corresponding
|
||||
* to the heap tuple can be obtained if needed from scan->currentItemData.
|
||||
@@ -409,8 +409,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
|
||||
scan->kill_prior_tuple = false;
|
||||
|
||||
/*
|
||||
* Can skip entering the index AM if we already got a tuple
|
||||
* and it must be unique.
|
||||
* Can skip entering the index AM if we already got a tuple and it
|
||||
* must be unique.
|
||||
*/
|
||||
if (scan->keys_are_unique && scan->got_tuple)
|
||||
return NULL;
|
||||
@@ -454,9 +454,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
|
||||
* index AM to not return it on future indexscans.
|
||||
*
|
||||
* We told heap_fetch to keep a pin on the buffer, so we can
|
||||
* re-access the tuple here. But we must re-lock the buffer first.
|
||||
* Also, it's just barely possible for an update of hint bits to
|
||||
* occur here.
|
||||
* re-access the tuple here. But we must re-lock the buffer
|
||||
* first. Also, it's just barely possible for an update of hint
|
||||
* bits to occur here.
|
||||
*/
|
||||
LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
|
||||
sv_infomask = heapTuple->t_data->t_infomask;
|
||||
@@ -497,7 +497,7 @@ bool
|
||||
index_getnext_indexitem(IndexScanDesc scan,
|
||||
ScanDirection direction)
|
||||
{
|
||||
bool found;
|
||||
bool found;
|
||||
|
||||
SCAN_CHECKS;
|
||||
|
||||
@@ -642,10 +642,11 @@ index_getprocinfo(Relation irel,
|
||||
procId = loc[procindex];
|
||||
|
||||
/*
|
||||
* Complain if function was not found during IndexSupportInitialize.
|
||||
* This should not happen unless the system tables contain bogus
|
||||
* entries for the index opclass. (If an AM wants to allow a
|
||||
* support function to be optional, it can use index_getprocid.)
|
||||
* Complain if function was not found during
|
||||
* IndexSupportInitialize. This should not happen unless the
|
||||
* system tables contain bogus entries for the index opclass. (If
|
||||
* an AM wants to allow a support function to be optional, it can
|
||||
* use index_getprocid.)
|
||||
*/
|
||||
if (!RegProcedureIsValid(procId))
|
||||
elog(ERROR, "Missing support function %d for attribute %d of index %s",
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.95 2002/08/06 02:36:33 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -119,14 +119,14 @@ top:
|
||||
*
|
||||
* NOTE: obviously, _bt_check_unique can only detect keys that are
|
||||
* already in the index; so it cannot defend against concurrent
|
||||
* insertions of the same key. We protect against that by means
|
||||
* of holding a write lock on the target page. Any other would-be
|
||||
* insertions of the same key. We protect against that by means of
|
||||
* holding a write lock on the target page. Any other would-be
|
||||
* inserter of the same key must acquire a write lock on the same
|
||||
* target page, so only one would-be inserter can be making the check
|
||||
* at one time. Furthermore, once we are past the check we hold
|
||||
* write locks continuously until we have performed our insertion,
|
||||
* so no later inserter can fail to see our insertion. (This
|
||||
* requires some care in _bt_insertonpg.)
|
||||
* at one time. Furthermore, once we are past the check we hold write
|
||||
* locks continuously until we have performed our insertion, so no
|
||||
* later inserter can fail to see our insertion. (This requires some
|
||||
* care in _bt_insertonpg.)
|
||||
*
|
||||
* If we must wait for another xact, we release the lock while waiting,
|
||||
* and then must start over completely.
|
||||
@@ -205,15 +205,16 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
|
||||
if (offset <= maxoff)
|
||||
{
|
||||
/*
|
||||
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
|
||||
* handling NULLs - and so we must not use _bt_compare in real
|
||||
* comparison, but only for ordering/finding items on pages. -
|
||||
* vadim 03/24/97
|
||||
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
|
||||
* how we handling NULLs - and so we must not use _bt_compare
|
||||
* in real comparison, but only for ordering/finding items on
|
||||
* pages. - vadim 03/24/97
|
||||
*/
|
||||
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
|
||||
break; /* we're past all the equal tuples */
|
||||
|
||||
curitemid = PageGetItemId(page, offset);
|
||||
|
||||
/*
|
||||
* We can skip the heap fetch if the item is marked killed.
|
||||
*/
|
||||
@@ -226,10 +227,11 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
|
||||
{
|
||||
/* it is a duplicate */
|
||||
TransactionId xwait =
|
||||
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
|
||||
SnapshotDirty->xmin : SnapshotDirty->xmax;
|
||||
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
|
||||
SnapshotDirty->xmin : SnapshotDirty->xmax;
|
||||
|
||||
ReleaseBuffer(hbuffer);
|
||||
|
||||
/*
|
||||
* If this tuple is being updated by other transaction
|
||||
* then we have to wait for its commit/abort.
|
||||
@@ -252,8 +254,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
|
||||
{
|
||||
/*
|
||||
* Hmm, if we can't see the tuple, maybe it can be
|
||||
* marked killed. This logic should match index_getnext
|
||||
* and btgettuple.
|
||||
* marked killed. This logic should match
|
||||
* index_getnext and btgettuple.
|
||||
*/
|
||||
uint16 sv_infomask;
|
||||
|
||||
@@ -421,7 +423,7 @@ _bt_insertonpg(Relation rel,
|
||||
{
|
||||
/* step right one page */
|
||||
BlockNumber rblkno = lpageop->btpo_next;
|
||||
Buffer rbuf;
|
||||
Buffer rbuf;
|
||||
|
||||
/*
|
||||
* must write-lock next page before releasing write lock on
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.91 2002/06/20 20:29:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.92 2002/09/04 20:31:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -315,24 +315,28 @@ btgettuple(PG_FUNCTION_ARGS)
|
||||
* buffer, too.
|
||||
*/
|
||||
_bt_restscan(scan);
|
||||
|
||||
/*
|
||||
* Check to see if we should kill the previously-fetched tuple.
|
||||
*/
|
||||
if (scan->kill_prior_tuple)
|
||||
{
|
||||
/*
|
||||
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
|
||||
* Yes, so mark it by setting the LP_DELETE bit in the item
|
||||
* flags.
|
||||
*/
|
||||
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
|
||||
page = BufferGetPage(so->btso_curbuf);
|
||||
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
|
||||
|
||||
/*
|
||||
* Since this can be redone later if needed, it's treated the
|
||||
* same as a commit-hint-bit status update for heap tuples:
|
||||
* we mark the buffer dirty but don't make a WAL log entry.
|
||||
* same as a commit-hint-bit status update for heap tuples: we
|
||||
* mark the buffer dirty but don't make a WAL log entry.
|
||||
*/
|
||||
SetBufferCommitInfoNeedsSave(so->btso_curbuf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now continue the scan.
|
||||
*/
|
||||
@@ -645,15 +649,15 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* If this is first deletion on this page, trade in read
|
||||
* lock for a really-exclusive write lock. Then, step
|
||||
* back one and re-examine the item, because other backends
|
||||
* might have inserted item(s) while we weren't holding
|
||||
* the lock!
|
||||
* back one and re-examine the item, because other
|
||||
* backends might have inserted item(s) while we weren't
|
||||
* holding the lock!
|
||||
*
|
||||
* We assume that only concurrent insertions, not deletions,
|
||||
* can occur while we're not holding the page lock (the caller
|
||||
* should hold a suitable relation lock to ensure this).
|
||||
* Therefore, the item we want to delete is either in the
|
||||
* same slot as before, or some slot to its right.
|
||||
* can occur while we're not holding the page lock (the
|
||||
* caller should hold a suitable relation lock to ensure
|
||||
* this). Therefore, the item we want to delete is either
|
||||
* in the same slot as before, or some slot to its right.
|
||||
* Rechecking the same slot is necessary and sufficient to
|
||||
* get back in sync after any insertions.
|
||||
*/
|
||||
@@ -675,19 +679,19 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* In either case, we now need to back up the scan one item,
|
||||
* so that the next cycle will re-examine the same offnum on
|
||||
* this page.
|
||||
* In either case, we now need to back up the scan one
|
||||
* item, so that the next cycle will re-examine the same
|
||||
* offnum on this page.
|
||||
*
|
||||
* For now, just hack the current-item index. Will need to
|
||||
* be smarter when deletion includes removal of empty
|
||||
* index pages.
|
||||
*
|
||||
* We must decrement ip_posid in all cases but one: if the
|
||||
* page was formerly rightmost but was split while we didn't
|
||||
* hold the lock, and ip_posid is pointing to item 1, then
|
||||
* ip_posid now points at the high key not a valid data item.
|
||||
* In this case we do want to step forward.
|
||||
* page was formerly rightmost but was split while we
|
||||
* didn't hold the lock, and ip_posid is pointing to item
|
||||
* 1, then ip_posid now points at the high key not a valid
|
||||
* data item. In this case we do want to step forward.
|
||||
*/
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
if (current->ip_posid >= P_FIRSTDATAKEY(opaque))
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.50 2002/06/20 20:29:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.51 2002/09/04 20:31:12 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -22,7 +22,7 @@
|
||||
#include "executor/execdebug.h"
|
||||
|
||||
|
||||
static int _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map);
|
||||
static int _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map);
|
||||
|
||||
|
||||
/*
|
||||
@@ -178,7 +178,7 @@ _bt_formitem(IndexTuple itup)
|
||||
* example.
|
||||
*
|
||||
* Furthermore, we detect the case where the index is unique and we have
|
||||
* equality quals for all columns. In this case there can be at most one
|
||||
* equality quals for all columns. In this case there can be at most one
|
||||
* (visible) matching tuple. index_getnext uses this to avoid uselessly
|
||||
* continuing the scan after finding one match.
|
||||
*
|
||||
@@ -439,8 +439,8 @@ _bt_orderkeys(IndexScanDesc scan)
|
||||
so->numberOfKeys = new_numberOfKeys;
|
||||
|
||||
/*
|
||||
* If unique index and we have equality keys for all columns,
|
||||
* set keys_are_unique flag for higher levels.
|
||||
* If unique index and we have equality keys for all columns, set
|
||||
* keys_are_unique flag for higher levels.
|
||||
*/
|
||||
if (allEqualSoFar && relation->rd_index->indisunique &&
|
||||
relation->rd_rel->relnatts == new_numberOfKeys)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.27 2002/06/20 20:29:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.28 2002/09/04 20:31:12 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -31,7 +31,7 @@ rtgettuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
|
||||
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
|
||||
bool res;
|
||||
bool res;
|
||||
|
||||
/* if we have it cached in the scan desc, just return the value */
|
||||
if (rtscancache(s, dir))
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.74 2002/06/25 17:26:11 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.75 2002/09/04 20:31:13 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -223,6 +223,7 @@ rtinsert(PG_FUNCTION_ARGS)
|
||||
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
|
||||
char *nulls = (char *) PG_GETARG_POINTER(2);
|
||||
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
|
||||
|
||||
#ifdef NOT_USED
|
||||
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
|
||||
bool checkUnique = PG_GETARG_BOOL(5);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.53 2002/06/20 20:29:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.54 2002/09/04 20:31:13 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains the high level access-method interface to the
|
||||
@@ -27,7 +27,7 @@
|
||||
* Flag indicating that we are bootstrapping.
|
||||
*
|
||||
* Transaction ID generation is disabled during bootstrap; we just use
|
||||
* BootstrapTransactionId. Also, the transaction ID status-check routines
|
||||
* BootstrapTransactionId. Also, the transaction ID status-check routines
|
||||
* are short-circuited; they claim that BootstrapTransactionId has already
|
||||
* committed, allowing tuples already inserted to be seen immediately.
|
||||
* ----------------
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* Copyright (c) 2000, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.50 2002/06/11 13:40:50 wieck Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.51 2002/09/04 20:31:13 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -68,12 +68,12 @@ GetNewTransactionId(void)
|
||||
* might see a partially-set xid here. But holding both locks at once
|
||||
* would be a nasty concurrency hit (and in fact could cause a
|
||||
* deadlock against GetSnapshotData). So for now, assume atomicity.
|
||||
* Note that readers of PGPROC xid field should be careful to fetch the
|
||||
* value only once, rather than assume they can read it multiple times
|
||||
* and get the same answer each time.
|
||||
* Note that readers of PGPROC xid field should be careful to fetch
|
||||
* the value only once, rather than assume they can read it multiple
|
||||
* times and get the same answer each time.
|
||||
*
|
||||
* A solution to the atomic-store problem would be to give each PGPROC its
|
||||
* own spinlock used only for fetching/storing that PGPROC's xid.
|
||||
* A solution to the atomic-store problem would be to give each PGPROC
|
||||
* its own spinlock used only for fetching/storing that PGPROC's xid.
|
||||
* (SInvalLock would then mean primarily that PROCs couldn't be added/
|
||||
* removed while holding the lock.)
|
||||
*/
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.131 2002/08/30 22:18:05 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.132 2002/09/04 20:31:13 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Transaction aborts can now occur two ways:
|
||||
@@ -265,7 +265,6 @@ SetTransactionFlushEnabled(bool state)
|
||||
{
|
||||
TransactionFlushState = (state == true);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -421,7 +420,7 @@ CommandCounterIncrement(void)
|
||||
TransactionState s = CurrentTransactionState;
|
||||
|
||||
s->commandId += 1;
|
||||
if (s->commandId == FirstCommandId) /* check for overflow */
|
||||
if (s->commandId == FirstCommandId) /* check for overflow */
|
||||
elog(ERROR, "You may only have 2^32-1 commands per transaction");
|
||||
|
||||
/* Propagate new command ID into query snapshots, if set */
|
||||
@@ -517,8 +516,8 @@ void
|
||||
RecordTransactionCommit(void)
|
||||
{
|
||||
/*
|
||||
* If we made neither any XLOG entries nor any temp-rel updates,
|
||||
* we can omit recording the transaction commit at all.
|
||||
* If we made neither any XLOG entries nor any temp-rel updates, we
|
||||
* can omit recording the transaction commit at all.
|
||||
*/
|
||||
if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate)
|
||||
{
|
||||
@@ -531,10 +530,10 @@ RecordTransactionCommit(void)
|
||||
START_CRIT_SECTION();
|
||||
|
||||
/*
|
||||
* We only need to log the commit in xlog if the transaction made any
|
||||
* transaction-controlled XLOG entries. (Otherwise, its XID appears
|
||||
* nowhere in permanent storage, so no one else will ever care if it
|
||||
* committed.)
|
||||
* We only need to log the commit in xlog if the transaction made
|
||||
* any transaction-controlled XLOG entries. (Otherwise, its XID
|
||||
* appears nowhere in permanent storage, so no one else will ever
|
||||
* care if it committed.)
|
||||
*/
|
||||
if (MyLastRecPtr.xrecoff != 0)
|
||||
{
|
||||
@@ -560,20 +559,20 @@ RecordTransactionCommit(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We must flush our XLOG entries to disk if we made any XLOG entries,
|
||||
* whether in or out of transaction control. For example, if we
|
||||
* reported a nextval() result to the client, this ensures that any
|
||||
* XLOG record generated by nextval will hit the disk before we report
|
||||
* the transaction committed.
|
||||
* We must flush our XLOG entries to disk if we made any XLOG
|
||||
* entries, whether in or out of transaction control. For
|
||||
* example, if we reported a nextval() result to the client, this
|
||||
* ensures that any XLOG record generated by nextval will hit the
|
||||
* disk before we report the transaction committed.
|
||||
*/
|
||||
if (MyXactMadeXLogEntry)
|
||||
{
|
||||
/*
|
||||
* Sleep before flush! So we can flush more than one commit
|
||||
* records per single fsync. (The idea is some other backend may
|
||||
* do the XLogFlush while we're sleeping. This needs work still,
|
||||
* because on most Unixen, the minimum select() delay is 10msec or
|
||||
* more, which is way too long.)
|
||||
* records per single fsync. (The idea is some other backend
|
||||
* may do the XLogFlush while we're sleeping. This needs work
|
||||
* still, because on most Unixen, the minimum select() delay
|
||||
* is 10msec or more, which is way too long.)
|
||||
*
|
||||
* We do not sleep if enableFsync is not turned on, nor if there
|
||||
* are fewer than CommitSiblings other backends with active
|
||||
@@ -593,13 +592,14 @@ RecordTransactionCommit(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We must mark the transaction committed in clog if its XID appears
|
||||
* either in permanent rels or in local temporary rels. We test
|
||||
* this by seeing if we made transaction-controlled entries *OR*
|
||||
* local-rel tuple updates. Note that if we made only the latter,
|
||||
* we have not emitted an XLOG record for our commit, and so in the
|
||||
* event of a crash the clog update might be lost. This is okay
|
||||
* because no one else will ever care whether we committed.
|
||||
* We must mark the transaction committed in clog if its XID
|
||||
* appears either in permanent rels or in local temporary rels.
|
||||
* We test this by seeing if we made transaction-controlled
|
||||
* entries *OR* local-rel tuple updates. Note that if we made
|
||||
* only the latter, we have not emitted an XLOG record for our
|
||||
* commit, and so in the event of a crash the clog update might be
|
||||
* lost. This is okay because no one else will ever care whether
|
||||
* we committed.
|
||||
*/
|
||||
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
|
||||
TransactionIdCommit(xid);
|
||||
@@ -628,6 +628,7 @@ AtCommit_Cache(void)
|
||||
* Clean up the relation cache.
|
||||
*/
|
||||
AtEOXact_RelationCache(true);
|
||||
|
||||
/*
|
||||
* Make catalog changes visible to all backends.
|
||||
*/
|
||||
@@ -698,8 +699,8 @@ RecordTransactionAbort(void)
|
||||
{
|
||||
/*
|
||||
* If we made neither any transaction-controlled XLOG entries nor any
|
||||
* temp-rel updates, we can omit recording the transaction abort at all.
|
||||
* No one will ever care that it aborted.
|
||||
* temp-rel updates, we can omit recording the transaction abort at
|
||||
* all. No one will ever care that it aborted.
|
||||
*/
|
||||
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
|
||||
{
|
||||
@@ -716,11 +717,12 @@ RecordTransactionAbort(void)
|
||||
START_CRIT_SECTION();
|
||||
|
||||
/*
|
||||
* We only need to log the abort in XLOG if the transaction made any
|
||||
* transaction-controlled XLOG entries. (Otherwise, its XID appears
|
||||
* nowhere in permanent storage, so no one else will ever care if it
|
||||
* committed.) We do not flush XLOG to disk in any case, since the
|
||||
* default assumption after a crash would be that we aborted, anyway.
|
||||
* We only need to log the abort in XLOG if the transaction made
|
||||
* any transaction-controlled XLOG entries. (Otherwise, its XID
|
||||
* appears nowhere in permanent storage, so no one else will ever
|
||||
* care if it committed.) We do not flush XLOG to disk in any
|
||||
* case, since the default assumption after a crash would be that
|
||||
* we aborted, anyway.
|
||||
*/
|
||||
if (MyLastRecPtr.xrecoff != 0)
|
||||
{
|
||||
@@ -1165,8 +1167,8 @@ StartTransactionCommand(bool preventChain)
|
||||
TransactionState s = CurrentTransactionState;
|
||||
|
||||
/*
|
||||
* Remember if caller wants to prevent autocommit-off chaining.
|
||||
* This is only allowed if not already in a transaction block.
|
||||
* Remember if caller wants to prevent autocommit-off chaining. This
|
||||
* is only allowed if not already in a transaction block.
|
||||
*/
|
||||
suppressChain = preventChain;
|
||||
if (preventChain && s->blockState != TBLOCK_DEFAULT)
|
||||
@@ -1260,16 +1262,18 @@ CommitTransactionCommand(bool forceCommit)
|
||||
{
|
||||
/*
|
||||
* If we aren't in a transaction block, and we are doing
|
||||
* autocommit, just do our usual transaction commit. But
|
||||
* if we aren't doing autocommit, start a transaction block
|
||||
* automatically by switching to INPROGRESS state. (We handle
|
||||
* this choice here, and not earlier, so that an explicit BEGIN
|
||||
* issued in autocommit-off mode won't issue strange warnings.)
|
||||
* autocommit, just do our usual transaction commit. But if
|
||||
* we aren't doing autocommit, start a transaction block
|
||||
* automatically by switching to INPROGRESS state. (We handle
|
||||
* this choice here, and not earlier, so that an explicit
|
||||
* BEGIN issued in autocommit-off mode won't issue strange
|
||||
* warnings.)
|
||||
*
|
||||
* Autocommit mode is forced by either a true forceCommit parameter
|
||||
* to me, or a true preventChain parameter to the preceding
|
||||
* StartTransactionCommand call. This is needed so that commands
|
||||
* like VACUUM can ensure that the right things happen.
|
||||
* Autocommit mode is forced by either a true forceCommit
|
||||
* parameter to me, or a true preventChain parameter to the
|
||||
* preceding StartTransactionCommand call. This is needed so
|
||||
* that commands like VACUUM can ensure that the right things
|
||||
* happen.
|
||||
*/
|
||||
case TBLOCK_DEFAULT:
|
||||
if (autocommit || forceCommit || suppressChain)
|
||||
@@ -1442,9 +1446,9 @@ BeginTransactionBlock(void)
|
||||
s->blockState = TBLOCK_BEGIN;
|
||||
|
||||
/*
|
||||
* do begin processing. NOTE: if you put anything here, check that
|
||||
* it behaves properly in both autocommit-on and autocommit-off modes.
|
||||
* In the latter case we will already have done some work in the new
|
||||
* do begin processing. NOTE: if you put anything here, check that it
|
||||
* behaves properly in both autocommit-on and autocommit-off modes. In
|
||||
* the latter case we will already have done some work in the new
|
||||
* transaction.
|
||||
*/
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.105 2002/09/02 02:47:01 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.106 2002/09/04 20:31:13 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -131,13 +131,13 @@ bool InRecovery = false;
|
||||
*
|
||||
* Note that XLOG records inserted outside transaction control are not
|
||||
* reflected into MyLastRecPtr. They do, however, cause MyXactMadeXLogEntry
|
||||
* to be set true. The latter can be used to test whether the current xact
|
||||
* to be set true. The latter can be used to test whether the current xact
|
||||
* made any loggable changes (including out-of-xact changes, such as
|
||||
* sequence updates).
|
||||
*
|
||||
* When we insert/update/delete a tuple in a temporary relation, we do not
|
||||
* make any XLOG record, since we don't care about recovering the state of
|
||||
* the temp rel after a crash. However, we will still need to remember
|
||||
* the temp rel after a crash. However, we will still need to remember
|
||||
* whether our transaction committed or aborted in that case. So, we must
|
||||
* set MyXactMadeTempRelUpdate true to indicate that the XID will be of
|
||||
* interest later.
|
||||
@@ -151,7 +151,7 @@ bool MyXactMadeTempRelUpdate = false;
|
||||
/*
|
||||
* ProcLastRecPtr points to the start of the last XLOG record inserted by the
|
||||
* current backend. It is updated for all inserts, transaction-controlled
|
||||
* or not. ProcLastRecEnd is similar but points to end+1 of last record.
|
||||
* or not. ProcLastRecEnd is similar but points to end+1 of last record.
|
||||
*/
|
||||
static XLogRecPtr ProcLastRecPtr = {0, 0};
|
||||
|
||||
@@ -162,7 +162,7 @@ XLogRecPtr ProcLastRecEnd = {0, 0};
|
||||
* (which is almost but not quite the same as a pointer to the most recent
|
||||
* CHECKPOINT record). We update this from the shared-memory copy,
|
||||
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
|
||||
* hold the Insert lock). See XLogInsert for details. We are also allowed
|
||||
* hold the Insert lock). See XLogInsert for details. We are also allowed
|
||||
* to update from XLogCtl->Insert.RedoRecPtr if we hold the info_lck;
|
||||
* see GetRedoRecPtr.
|
||||
*/
|
||||
@@ -766,7 +766,7 @@ begin:;
|
||||
/*
|
||||
* We do not acquire SInvalLock here because of possible deadlock.
|
||||
* Anyone who wants to inspect other procs' logRec must acquire
|
||||
* WALInsertLock, instead. A better solution would be a per-PROC
|
||||
* WALInsertLock, instead. A better solution would be a per-PROC
|
||||
* spinlock, but no time for that before 7.2 --- tgl 12/19/01.
|
||||
*/
|
||||
MyProc->logRec = RecPtr;
|
||||
@@ -1283,26 +1283,27 @@ XLogFlush(XLogRecPtr record)
|
||||
|
||||
/*
|
||||
* If we still haven't flushed to the request point then we have a
|
||||
* problem; most likely, the requested flush point is past end of XLOG.
|
||||
* This has been seen to occur when a disk page has a corrupted LSN.
|
||||
* problem; most likely, the requested flush point is past end of
|
||||
* XLOG. This has been seen to occur when a disk page has a corrupted
|
||||
* LSN.
|
||||
*
|
||||
* Formerly we treated this as a PANIC condition, but that hurts the
|
||||
* system's robustness rather than helping it: we do not want to take
|
||||
* down the whole system due to corruption on one data page. In
|
||||
* particular, if the bad page is encountered again during recovery then
|
||||
* we would be unable to restart the database at all! (This scenario
|
||||
* has actually happened in the field several times with 7.1 releases.
|
||||
* Note that we cannot get here while InRedo is true, but if the bad
|
||||
* page is brought in and marked dirty during recovery then
|
||||
* particular, if the bad page is encountered again during recovery
|
||||
* then we would be unable to restart the database at all! (This
|
||||
* scenario has actually happened in the field several times with 7.1
|
||||
* releases. Note that we cannot get here while InRedo is true, but if
|
||||
* the bad page is brought in and marked dirty during recovery then
|
||||
* CreateCheckpoint will try to flush it at the end of recovery.)
|
||||
*
|
||||
* The current approach is to ERROR under normal conditions, but only
|
||||
* WARNING during recovery, so that the system can be brought up even if
|
||||
* there's a corrupt LSN. Note that for calls from xact.c, the ERROR
|
||||
* will be promoted to PANIC since xact.c calls this routine inside a
|
||||
* critical section. However, calls from bufmgr.c are not within
|
||||
* critical sections and so we will not force a restart for a bad LSN
|
||||
* on a data page.
|
||||
* WARNING during recovery, so that the system can be brought up even
|
||||
* if there's a corrupt LSN. Note that for calls from xact.c, the
|
||||
* ERROR will be promoted to PANIC since xact.c calls this routine
|
||||
* inside a critical section. However, calls from bufmgr.c are not
|
||||
* within critical sections and so we will not force a restart for a
|
||||
* bad LSN on a data page.
|
||||
*/
|
||||
if (XLByteLT(LogwrtResult.Flush, record))
|
||||
elog(InRecovery ? WARNING : ERROR,
|
||||
@@ -1565,7 +1566,7 @@ PreallocXlogFiles(XLogRecPtr endptr)
|
||||
|
||||
XLByteToPrevSeg(endptr, _logId, _logSeg);
|
||||
if ((endptr.xrecoff - 1) % XLogSegSize >=
|
||||
(uint32) (0.75 * XLogSegSize))
|
||||
(uint32) (0.75 * XLogSegSize))
|
||||
{
|
||||
NextLogSeg(_logId, _logSeg);
|
||||
use_existent = true;
|
||||
@@ -1618,8 +1619,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
|
||||
/*
|
||||
* Before deleting the file, see if it can be recycled as
|
||||
* a future log segment. We allow recycling segments up
|
||||
* to XLOGfileslop segments beyond the current
|
||||
* XLOG location.
|
||||
* to XLOGfileslop segments beyond the current XLOG
|
||||
* location.
|
||||
*/
|
||||
if (InstallXLogFileSegment(endlogId, endlogSeg, path,
|
||||
true, XLOGfileslop,
|
||||
@@ -2196,7 +2197,7 @@ ReadControlFile(void)
|
||||
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
|
||||
"\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
|
||||
"\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->catalog_version_no, CATALOG_VERSION_NO);
|
||||
if (ControlFile->blcksz != BLCKSZ)
|
||||
@@ -2221,7 +2222,7 @@ ReadControlFile(void)
|
||||
|
||||
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with FUNC_MAX_ARGS %d,\n"
|
||||
"The database cluster was initialized with FUNC_MAX_ARGS %d,\n"
|
||||
"\tbut the backend was compiled with FUNC_MAX_ARGS %d.\n"
|
||||
"\tIt looks like you need to recompile or initdb.",
|
||||
ControlFile->funcMaxArgs, FUNC_MAX_ARGS);
|
||||
@@ -2235,21 +2236,21 @@ ReadControlFile(void)
|
||||
#else
|
||||
if (ControlFile->enableIntTimes != FALSE)
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with HAVE_INT64_TIMESTAMP\n"
|
||||
"\tbut the backend was compiled without HAVE_INT64_TIMESTAMP.\n"
|
||||
"The database cluster was initialized with HAVE_INT64_TIMESTAMP\n"
|
||||
"\tbut the backend was compiled without HAVE_INT64_TIMESTAMP.\n"
|
||||
"\tIt looks like you need to recompile or initdb.");
|
||||
#endif
|
||||
|
||||
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with LOCALE_NAME_BUFLEN %d,\n"
|
||||
"\tbut the backend was compiled with LOCALE_NAME_BUFLEN %d.\n"
|
||||
"\tbut the backend was compiled with LOCALE_NAME_BUFLEN %d.\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN);
|
||||
|
||||
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with LC_COLLATE '%s',\n"
|
||||
"The database cluster was initialized with LC_COLLATE '%s',\n"
|
||||
"\twhich is not recognized by setlocale().\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->lc_collate);
|
||||
@@ -3019,19 +3020,19 @@ CreateCheckPoint(bool shutdown)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get UNDO record ptr - this is oldest of PGPROC->logRec values. We do
|
||||
* this while holding insert lock to ensure that we won't miss any
|
||||
* Get UNDO record ptr - this is oldest of PGPROC->logRec values. We
|
||||
* do this while holding insert lock to ensure that we won't miss any
|
||||
* about-to-commit transactions (UNDO must include all xacts that have
|
||||
* commits after REDO point).
|
||||
*
|
||||
* XXX temporarily ifdef'd out to avoid three-way deadlock condition:
|
||||
* GetUndoRecPtr needs to grab SInvalLock to ensure that it is looking
|
||||
* at a stable set of proc records, but grabbing SInvalLock while holding
|
||||
* WALInsertLock is no good. GetNewTransactionId may cause a WAL record
|
||||
* to be written while holding XidGenLock, and GetSnapshotData needs to
|
||||
* get XidGenLock while holding SInvalLock, so there's a risk of deadlock.
|
||||
* Need to find a better solution. See pgsql-hackers discussion of
|
||||
* 17-Dec-01.
|
||||
* at a stable set of proc records, but grabbing SInvalLock while
|
||||
* holding WALInsertLock is no good. GetNewTransactionId may cause a
|
||||
* WAL record to be written while holding XidGenLock, and
|
||||
* GetSnapshotData needs to get XidGenLock while holding SInvalLock,
|
||||
* so there's a risk of deadlock. Need to find a better solution. See
|
||||
* pgsql-hackers discussion of 17-Dec-01.
|
||||
*/
|
||||
#ifdef NOT_USED
|
||||
checkPoint.undo = GetUndoRecPtr();
|
||||
@@ -3298,9 +3299,7 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!doit)
|
||||
return method;
|
||||
|
||||
Reference in New Issue
Block a user