1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-30 11:03:19 +03:00

pgindent run over code.

This commit is contained in:
Bruce Momjian
1999-05-25 16:15:34 +00:00
parent 4b04b01aaa
commit 07842084fe
413 changed files with 11723 additions and 10769 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.50 1999/03/14 20:17:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.51 1999/05/25 16:06:35 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@ -124,7 +124,7 @@ DataFill(char *data,
*bitP |= bitmask;
}
data = (char *)att_align((long)data, att[i]->attlen, att[i]->attalign);
data = (char *) att_align((long) data, att[i]->attlen, att[i]->attalign);
switch (att[i]->attlen)
{
case -1:
@ -151,7 +151,7 @@ DataFill(char *data,
att[i]->attlen);
break;
}
data = (char *)att_addlength((long)data, att[i]->attlen, value[i]);
data = (char *) att_addlength((long) data, att[i]->attlen, value[i]);
}
}
@ -210,7 +210,7 @@ heap_attisnull(HeapTuple tup, int attnum)
int
heap_sysattrlen(AttrNumber attno)
{
HeapTupleHeader f = NULL;
HeapTupleHeader f = NULL;
switch (attno)
{
@ -301,6 +301,7 @@ heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
}
return (Datum) NULL;
}
#endif
/* ----------------
@ -328,11 +329,11 @@ nocachegetattr(HeapTuple tuple,
TupleDesc tupleDesc,
bool *isnull)
{
char *tp; /* ptr to att in tuple */
HeapTupleHeader tup = tuple->t_data;
bits8 *bp = tup->t_bits; /* ptr to att in tuple */
Form_pg_attribute *att = tupleDesc->attrs;
int slow = 0; /* do we have to walk nulls? */
char *tp; /* ptr to att in tuple */
HeapTupleHeader tup = tuple->t_data;
bits8 *bp = tup->t_bits; /* ptr to att in tuple */
Form_pg_attribute *att = tupleDesc->attrs;
int slow = 0; /* do we have to walk nulls? */
#if IN_MACRO
@ -376,6 +377,7 @@ nocachegetattr(HeapTuple tuple,
}
else
{
/*
* there's a null somewhere in the tuple
*/
@ -404,12 +406,13 @@ nocachegetattr(HeapTuple tuple,
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
if ((~ bp[byte]) & ((1 << finalbit) - 1))
if ((~bp[byte]) & ((1 << finalbit) - 1))
slow = 1;
else
{
/* check for nulls in any "earlier" bytes */
int i;
int i;
for (i = 0; i < byte; i++)
{
if (bp[i] != 0xFF)
@ -439,6 +442,7 @@ nocachegetattr(HeapTuple tuple,
else if (!HeapTupleAllFixed(tuple))
{
int j;
/*
* In for(), we make this <= and not < because we want to test
* if we can go past it in initializing offsets.
@ -456,9 +460,9 @@ nocachegetattr(HeapTuple tuple,
/*
* If slow is zero, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute.
* If possible, we also want to initialize the remainder of the
* attribute cached offset values.
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
{
@ -570,7 +574,7 @@ heap_copytuple(HeapTuple tuple)
newTuple->t_len = tuple->t_len;
newTuple->t_self = tuple->t_self;
newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
memmove((char *) newTuple->t_data,
memmove((char *) newTuple->t_data,
(char *) tuple->t_data, (int) tuple->t_len);
return newTuple;
}
@ -589,11 +593,11 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
dest->t_data = NULL;
return;
}
dest->t_len = src->t_len;
dest->t_self = src->t_self;
dest->t_data = (HeapTupleHeader) palloc(src->t_len);
memmove((char *) dest->t_data,
memmove((char *) dest->t_data,
(char *) src->t_data, (int) src->t_len);
return;
}
@ -657,14 +661,14 @@ heap_formtuple(TupleDesc tupleDescriptor,
Datum *value,
char *nulls)
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
int bitmaplen;
long len;
int hoff;
bool hasnull = false;
int i;
int numberOfAttributes = tupleDescriptor->natts;
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
int bitmaplen;
long len;
int hoff;
bool hasnull = false;
int i;
int numberOfAttributes = tupleDescriptor->natts;
len = offsetof(HeapTupleHeaderData, t_bits);
@ -760,9 +764,9 @@ heap_modifytuple(HeapTuple tuple,
if (repl[attoff] == ' ')
{
value[attoff] = heap_getattr(tuple,
AttrOffsetGetAttrNumber(attoff),
RelationGetDescr(relation),
&isNull);
AttrOffsetGetAttrNumber(attoff),
RelationGetDescr(relation),
&isNull);
nulls[attoff] = (isNull) ? 'n' : ' ';
}
@ -790,12 +794,12 @@ heap_modifytuple(HeapTuple tuple,
infomask = newTuple->t_data->t_infomask;
memmove((char *) &newTuple->t_data->t_oid, /* XXX */
(char *) &tuple->t_data->t_oid,
((char *) &tuple->t_data->t_hoff -
(char *) &tuple->t_data->t_oid)); /* XXX */
((char *) &tuple->t_data->t_hoff -
(char *) &tuple->t_data->t_oid)); /* XXX */
newTuple->t_data->t_infomask = infomask;
newTuple->t_data->t_natts = numberOfAttributes;
newTuple->t_self = tuple->t_self;
return newTuple;
}
@ -809,10 +813,10 @@ heap_addheader(uint32 natts, /* max domain index */
int structlen, /* its length */
char *structure) /* pointer to the struct */
{
HeapTuple tuple;
HeapTupleHeader td; /* tuple data */
long len;
int hoff;
HeapTuple tuple;
HeapTupleHeader td; /* tuple data */
long len;
int hoff;
AssertArg(natts > 0);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.45 1999/05/10 00:44:50 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.46 1999/05/25 16:06:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,9 +25,9 @@
#include "libpq/pqformat.h"
#include "utils/syscache.h"
static void printtup_setup(DestReceiver* self, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self);
static void printtup_cleanup(DestReceiver* self);
static void printtup_setup(DestReceiver * self, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self);
static void printtup_cleanup(DestReceiver * self);
/* ----------------------------------------------------------------
* printtup / debugtup support
@ -43,7 +43,7 @@ static void printtup_cleanup(DestReceiver* self);
* ----------------
*/
int
getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
getTypeOutAndElem(Oid type, Oid *typOutput, Oid *typElem)
{
HeapTuple typeTuple;
@ -54,6 +54,7 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
if (HeapTupleIsValid(typeTuple))
{
Form_pg_type pt = (Form_pg_type) GETSTRUCT(typeTuple);
*typOutput = (Oid) pt->typoutput;
*typElem = (Oid) pt->typelem;
return OidIsValid(*typOutput);
@ -70,27 +71,29 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
* Private state for a printtup destination object
* ----------------
*/
typedef struct { /* Per-attribute information */
typedef struct
{ /* Per-attribute information */
Oid typoutput; /* Oid for the attribute's type output fn */
Oid typelem; /* typelem value to pass to the output fn */
FmgrInfo finfo; /* Precomputed call info for typoutput */
} PrinttupAttrInfo;
} PrinttupAttrInfo;
typedef struct {
DestReceiver pub; /* publicly-known function pointers */
TupleDesc attrinfo; /* The attr info we are set up for */
int nattrs;
PrinttupAttrInfo *myinfo; /* Cached info about each attr */
} DR_printtup;
typedef struct
{
DestReceiver pub; /* publicly-known function pointers */
TupleDesc attrinfo; /* The attr info we are set up for */
int nattrs;
PrinttupAttrInfo *myinfo; /* Cached info about each attr */
} DR_printtup;
/* ----------------
* Initialize: create a DestReceiver for printtup
* ----------------
*/
DestReceiver*
DestReceiver *
printtup_create_DR()
{
DR_printtup* self = (DR_printtup*) palloc(sizeof(DR_printtup));
DR_printtup *self = (DR_printtup *) palloc(sizeof(DR_printtup));
self->pub.receiveTuple = printtup;
self->pub.setup = printtup_setup;
@ -100,42 +103,43 @@ printtup_create_DR()
self->nattrs = 0;
self->myinfo = NULL;
return (DestReceiver*) self;
return (DestReceiver *) self;
}
static void
printtup_setup(DestReceiver* self, TupleDesc typeinfo)
printtup_setup(DestReceiver * self, TupleDesc typeinfo)
{
/* ----------------
* We could set up the derived attr info at this time, but we postpone it
* until the first call of printtup, for 3 reasons:
* 1. We don't waste time (compared to the old way) if there are no
* tuples at all to output.
* tuples at all to output.
* 2. Checking in printtup allows us to handle the case that the tuples
* change type midway through (although this probably can't happen in
* the current executor).
* change type midway through (although this probably can't happen in
* the current executor).
* 3. Right now, ExecutorRun passes a NULL for typeinfo anyway :-(
* ----------------
*/
}
static void
printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
printtup_prepare_info(DR_printtup * myState, TupleDesc typeinfo, int numAttrs)
{
int i;
int i;
if (myState->myinfo)
pfree(myState->myinfo); /* get rid of any old data */
pfree(myState->myinfo); /* get rid of any old data */
myState->myinfo = NULL;
myState->attrinfo = typeinfo;
myState->nattrs = numAttrs;
if (numAttrs <= 0)
return;
myState->myinfo = (PrinttupAttrInfo*)
myState->myinfo = (PrinttupAttrInfo *)
palloc(numAttrs * sizeof(PrinttupAttrInfo));
for (i = 0; i < numAttrs; i++)
{
PrinttupAttrInfo* thisState = myState->myinfo + i;
PrinttupAttrInfo *thisState = myState->myinfo + i;
if (getTypeOutAndElem((Oid) typeinfo->attrs[i]->atttypid,
&thisState->typoutput, &thisState->typelem))
fmgr_info(thisState->typoutput, &thisState->finfo);
@ -147,9 +151,9 @@ printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
* ----------------
*/
static void
printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
DR_printtup *myState = (DR_printtup*) self;
DR_printtup *myState = (DR_printtup *) self;
StringInfoData buf;
int i,
j,
@ -178,7 +182,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
k = 1 << 7;
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
if (! heap_attisnull(tuple, i + 1))
if (!heap_attisnull(tuple, i + 1))
j |= k; /* set bit if not null */
k >>= 1;
if (k == 0) /* end of byte? */
@ -197,7 +201,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
*/
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
PrinttupAttrInfo* thisState = myState->myinfo + i;
PrinttupAttrInfo *thisState = myState->myinfo + i;
attr = heap_getattr(tuple, i + 1, typeinfo, &isnull);
if (isnull)
continue;
@ -223,9 +228,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
* ----------------
*/
static void
printtup_cleanup(DestReceiver* self)
printtup_cleanup(DestReceiver * self)
{
DR_printtup* myState = (DR_printtup*) self;
DR_printtup *myState = (DR_printtup *) self;
if (myState->myinfo)
pfree(myState->myinfo);
pfree(myState);
@ -274,7 +280,7 @@ showatts(char *name, TupleDesc tupleDesc)
* ----------------
*/
void
debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
int i;
Datum attr;
@ -310,7 +316,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
* ----------------
*/
void
printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
StringInfoData buf;
int i,
@ -334,7 +340,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
k = 1 << 7;
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
if (! heap_attisnull(tuple, i + 1))
if (!heap_attisnull(tuple, i + 1))
j |= k; /* set bit if not null */
k >>= 1;
if (k == 0) /* end of byte? */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.13 1999/02/13 23:14:13 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.14 1999/05/25 16:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -18,7 +18,7 @@
#include <access/skey.h>
/*
* ScanKeyEntryIsLegal
* ScanKeyEntryIsLegal
* True iff the scan key entry is legal.
*/
#define ScanKeyEntryIsLegal(entry) \
@ -28,7 +28,7 @@
)
/*
* ScanKeyEntrySetIllegal
* ScanKeyEntrySetIllegal
* Marks a scan key entry as illegal.
*/
void
@ -43,7 +43,7 @@ ScanKeyEntrySetIllegal(ScanKey entry)
}
/*
* ScanKeyEntryInitialize
* ScanKeyEntryInitialize
* Initializes an scan key entry.
*
* Note:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.48 1999/02/13 23:14:14 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.49 1999/05/25 16:06:42 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@ -487,7 +487,7 @@ BuildDescForRelation(List *schema, char *relname)
{
/* array of XXX is _XXX */
snprintf(typename, NAMEDATALEN,
"_%.*s", NAMEDATALEN - 2, entry->typename->name);
"_%.*s", NAMEDATALEN - 2, entry->typename->name);
attdim = length(arry);
}
else

View File

@ -344,7 +344,7 @@ gistinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
/*
* Notes in ExecUtils:ExecOpenIndices()
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
res = gistdoinsert(r, itup, &giststate);
@ -1106,10 +1106,10 @@ gistdelete(Relation r, ItemPointer tid)
Page page;
/*
* Notes in ExecUtils:ExecOpenIndices()
* Also note that only vacuum deletes index tuples now...
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
* deletes index tuples now...
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
blkno = ItemPointerGetBlockNumber(tid);

View File

@ -68,7 +68,7 @@ gistbeginscan(Relation r,
/*
* Let index_beginscan does its work...
*
RelationSetLockForRead(r);
* RelationSetLockForRead(r);
*/
s = RelationGetIndexScan(r, fromEnd, nkeys, key);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.25 1999/02/13 23:14:17 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.26 1999/05/25 16:06:54 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -485,9 +485,9 @@ hashrestrpos(IndexScanDesc scan)
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData)))
{
so->hashso_curbuf =_hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);
so->hashso_curbuf = _hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);
scan->currentItemData = scan->currentMarkData;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.16 1999/03/14 16:27:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.17 1999/05/25 16:06:56 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -34,9 +34,9 @@ hashint4(uint32 key)
}
uint32
hashint8(int64 *key)
hashint8(int64 * key)
{
return ~((uint32)*key);
return ~((uint32) *key);
}
/* Hash function from Chris Torek. */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.19 1999/02/13 23:14:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.20 1999/05/25 16:06:58 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@ -321,7 +321,7 @@ _hash_setpagelock(Relation rel,
{
switch (access)
{
case HASH_WRITE:
case HASH_WRITE:
LockPage(rel, blkno, ExclusiveLock);
break;
case HASH_READ:
@ -345,7 +345,7 @@ _hash_unsetpagelock(Relation rel,
{
switch (access)
{
case HASH_WRITE:
case HASH_WRITE:
UnlockPage(rel, blkno, ExclusiveLock);
break;
case HASH_READ:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.42 1999/03/28 20:31:56 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.43 1999/05/25 16:07:04 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -117,7 +117,7 @@ initscan(HeapScanDesc scan,
* relation is empty
* ----------------
*/
scan->rs_ntup.t_data = scan->rs_ctup.t_data =
scan->rs_ntup.t_data = scan->rs_ctup.t_data =
scan->rs_ptup.t_data = NULL;
scan->rs_nbuf = scan->rs_cbuf = scan->rs_pbuf = InvalidBuffer;
}
@ -216,15 +216,15 @@ heapgettup(Relation relation,
int nkeys,
ScanKey key)
{
ItemId lpp;
Page dp;
int page;
int pages;
int lines;
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
(ItemPointer) NULL : &(tuple->t_self);
ItemId lpp;
Page dp;
int page;
int pages;
int lines;
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
(ItemPointer) NULL : &(tuple->t_self);
/* ----------------
* increment access statistics
@ -290,8 +290,8 @@ heapgettup(Relation relation,
return;
}
*buffer = RelationGetBufferWithBuffer(relation,
ItemPointerGetBlockNumber(tid),
*buffer);
ItemPointerGetBlockNumber(tid),
*buffer);
if (!BufferIsValid(*buffer))
elog(ERROR, "heapgettup: failed ReadBuffer");
@ -439,7 +439,8 @@ heapgettup(Relation relation,
}
else
{
++lpp; /* move forward in this page's ItemId array */
++lpp; /* move forward in this page's ItemId
* array */
++lineoff;
}
}
@ -816,6 +817,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{ /* NONTUP */
/*
* Don't release scan->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
@ -897,6 +899,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{ /* NONTUP */
/*
* Don't release scan->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
@ -966,11 +969,11 @@ heap_fetch(Relation relation,
HeapTuple tuple,
Buffer *userbuf)
{
ItemId lp;
Buffer buffer;
PageHeader dp;
ItemPointer tid = &(tuple->t_self);
OffsetNumber offnum;
ItemId lp;
Buffer buffer;
PageHeader dp;
ItemPointer tid = &(tuple->t_self);
OffsetNumber offnum;
AssertMacro(PointerIsValid(userbuf)); /* see comments above */
@ -1093,9 +1096,7 @@ heap_insert(Relation relation, HeapTuple tup)
RelationPutHeapTupleAtEnd(relation, tup);
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
{
RelationInvalidateHeapTuple(relation, tup);
}
return tup->t_data->t_oid;
}
@ -1106,11 +1107,11 @@ heap_insert(Relation relation, HeapTuple tup)
int
heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid)
{
ItemId lp;
HeapTupleData tp;
PageHeader dp;
Buffer buffer;
int result;
ItemId lp;
HeapTupleData tp;
PageHeader dp;
Buffer buffer;
int result;
/* increment access statistics */
IncrHeapAccessStat(local_delete);
@ -1130,10 +1131,10 @@ heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid)
tp.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
tp.t_len = ItemIdGetLength(lp);
tp.t_self = *tid;
l1:
result = HeapTupleSatisfiesUpdate(&tp);
if (result == HeapTupleInvisible)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1142,7 +1143,7 @@ l1:
}
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait = tp.t_data->t_xmax;
TransactionId xwait = tp.t_data->t_xmax;
/* sleep untill concurrent transaction ends */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1177,8 +1178,8 @@ l1:
/* store transaction information of xact deleting the tuple */
TransactionIdStore(GetCurrentTransactionId(), &(tp.t_data->t_xmax));
tp.t_data->t_cmax = GetCurrentCommandId();
tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1194,14 +1195,14 @@ l1:
* heap_replace - replace a tuple
*/
int
heap_replace(Relation relation, ItemPointer otid, HeapTuple newtup,
ItemPointer ctid)
heap_replace(Relation relation, ItemPointer otid, HeapTuple newtup,
ItemPointer ctid)
{
ItemId lp;
HeapTupleData oldtup;
PageHeader dp;
Buffer buffer;
int result;
ItemId lp;
HeapTupleData oldtup;
PageHeader dp;
Buffer buffer;
int result;
/* increment access statistics */
IncrHeapAccessStat(local_replace);
@ -1223,7 +1224,7 @@ heap_replace(Relation relation, ItemPointer otid, HeapTuple newtup,
l2:
result = HeapTupleSatisfiesUpdate(&oldtup);
if (result == HeapTupleInvisible)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1232,7 +1233,7 @@ l2:
}
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait = oldtup.t_data->t_xmax;
TransactionId xwait = oldtup.t_data->t_xmax;
/* sleep untill concurrent transaction ends */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1275,19 +1276,20 @@ l2:
/* logically delete old item */
TransactionIdStore(GetCurrentTransactionId(), &(oldtup.t_data->t_xmax));
oldtup.t_data->t_cmax = GetCurrentCommandId();
oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
/* insert new item */
if ((unsigned) DOUBLEALIGN(newtup->t_len) <= PageGetFreeSpace((Page) dp))
RelationPutHeapTuple(relation, buffer, newtup);
else
{
/*
* New item won't fit on same page as old item, have to look
* for a new place to put it. Note that we have to unlock
* current buffer context - not good but RelationPutHeapTupleAtEnd
* uses extend lock.
* New item won't fit on same page as old item, have to look for a
* new place to put it. Note that we have to unlock current buffer
* context - not good but RelationPutHeapTupleAtEnd uses extend
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
RelationPutHeapTupleAtEnd(relation, newtup);
@ -1295,8 +1297,8 @@ l2:
}
/*
* New item in place, now record address of new tuple in
* t_ctid of old one.
* New item in place, now record address of new tuple in t_ctid of old
* one.
*/
oldtup.t_data->t_ctid = newtup->t_self;
@ -1316,10 +1318,10 @@ l2:
int
heap_mark4update(Relation relation, HeapTuple tuple, Buffer *buffer)
{
ItemPointer tid = &(tuple->t_self);
ItemId lp;
PageHeader dp;
int result;
ItemPointer tid = &(tuple->t_self);
ItemId lp;
PageHeader dp;
int result;
/* increment access statistics */
IncrHeapAccessStat(local_mark4update);
@ -1336,10 +1338,10 @@ heap_mark4update(Relation relation, HeapTuple tuple, Buffer *buffer)
lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid));
tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
tuple->t_len = ItemIdGetLength(lp);
l3:
result = HeapTupleSatisfiesUpdate(tuple);
if (result == HeapTupleInvisible)
{
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
@ -1348,7 +1350,7 @@ l3:
}
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait = tuple->t_data->t_xmax;
TransactionId xwait = tuple->t_data->t_xmax;
/* sleep untill concurrent transaction ends */
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.19 1999/05/07 01:22:53 vadim Exp $
* $Id: hio.c,v 1.20 1999/05/25 16:07:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,11 +39,11 @@ RelationPutHeapTuple(Relation relation,
Buffer buffer,
HeapTuple tuple)
{
Page pageHeader;
OffsetNumber offnum;
unsigned int len;
ItemId itemId;
Item item;
Page pageHeader;
OffsetNumber offnum;
unsigned int len;
ItemId itemId;
Item item;
/* ----------------
* increment access statistics
@ -62,13 +62,13 @@ RelationPutHeapTuple(Relation relation,
itemId = PageGetItemId((Page) pageHeader, offnum);
item = PageGetItem((Page) pageHeader, itemId);
ItemPointerSet(&((HeapTupleHeader) item)->t_ctid,
BufferGetBlockNumber(buffer), offnum);
ItemPointerSet(&((HeapTupleHeader) item)->t_ctid,
BufferGetBlockNumber(buffer), offnum);
/*
* Let the caller do this!
*
WriteBuffer(buffer);
* WriteBuffer(buffer);
*/
/* return an accurate tuple */
@ -111,8 +111,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
Item item;
/*
* Lock relation for extention. We can use LockPage here as long as
* in all other places we use page-level locking for indices only.
* Lock relation for extention. We can use LockPage here as long as in
* all other places we use page-level locking for indices only.
* Alternatevely, we could define pseudo-table as we do for
* transactions with XactLockTable.
*/
@ -132,6 +132,7 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
{
buffer = ReadBuffer(relation, lastblock);
pageHeader = (Page) BufferGetPage(buffer);
/*
* There was IF instead of ASSERT here ?!
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.16 1999/02/13 23:14:29 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.17 1999/05/25 16:07:12 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@ -270,5 +270,5 @@ IndexScanRestorePosition(IndexScanDesc scan)
scan->flags = 0x0; /* XXX should have a symbolic name */
}
#endif
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.31 1999/02/13 23:14:30 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.32 1999/05/25 16:07:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -57,7 +57,7 @@ static bool StrategyTermIsValid(StrategyTerm term,
*/
/*
* StrategyMapGetScanKeyEntry
* StrategyMapGetScanKeyEntry
* Returns a scan key entry of a index strategy mapping member.
*
* Note:
@ -75,7 +75,7 @@ StrategyMapGetScanKeyEntry(StrategyMap map,
}
/*
* IndexStrategyGetStrategyMap
* IndexStrategyGetStrategyMap
* Returns an index strategy mapping of an index strategy.
*
* Note:
@ -97,7 +97,7 @@ IndexStrategyGetStrategyMap(IndexStrategy indexStrategy,
}
/*
* AttributeNumberGetIndexStrategySize
* AttributeNumberGetIndexStrategySize
* Computes the size of an index strategy.
*/
Size
@ -294,8 +294,8 @@ RelationGetStrategy(Relation relation,
Assert(RegProcedureIsValid(procedure));
strategyMap = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
evaluation->maxStrategy,
attributeNumber);
evaluation->maxStrategy,
attributeNumber);
/* get a strategy number for the procedure ignoring flags for now */
for (index = 0; index < evaluation->maxStrategy; index += 1)
@ -526,7 +526,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
/*
* IndexSupportInitialize
* IndexSupportInitialize
* Initializes an index strategy and associated support procedures.
*/
void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.22 1999/03/14 05:08:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.23 1999/05/25 16:07:21 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -40,7 +40,7 @@ btint4cmp(int32 a, int32 b)
}
int32
btint8cmp(int64 *a, int64 *b)
btint8cmp(int64 * a, int64 * b)
{
if (*a > *b)
return 1;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.39 1999/05/01 16:09:45 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.40 1999/05/25 16:07:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -99,13 +99,13 @@ l1:
/* key on the page before trying to compare it */
if (!PageIsEmpty(page) && offset <= maxoff)
{
TupleDesc itupdesc;
BTItem cbti;
HeapTupleData htup;
BTPageOpaque opaque;
Buffer nbuf;
BlockNumber blkno;
bool chtup = true;
TupleDesc itupdesc;
BTItem cbti;
HeapTupleData htup;
BTPageOpaque opaque;
Buffer nbuf;
BlockNumber blkno;
bool chtup = true;
itupdesc = RelationGetDescr(rel);
nbuf = InvalidBuffer;
@ -122,15 +122,16 @@ l1:
*/
while (_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
{ /* they're equal */
/*
* Have to check is inserted heap tuple deleted one
* (i.e. just moved to another place by vacuum)!
* Have to check is inserted heap tuple deleted one (i.e.
* just moved to another place by vacuum)!
*/
if (chtup)
{
htup.t_self = btitem->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
if (htup.t_data == NULL) /* YES! */
if (htup.t_data == NULL) /* YES! */
break;
/* Live tuple was inserted */
ReleaseBuffer(buffer);
@ -139,11 +140,11 @@ l1:
cbti = (BTItem) PageGetItem(page, PageGetItemId(page, offset));
htup.t_self = cbti->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
if (htup.t_data != NULL) /* it is a duplicate */
if (htup.t_data != NULL) /* it is a duplicate */
{
TransactionId xwait =
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
TransactionId xwait =
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
/*
* If this tuple is being updated by other transaction
@ -156,7 +157,7 @@ l1:
_bt_relbuf(rel, nbuf, BT_READ);
_bt_relbuf(rel, buf, BT_WRITE);
XactLockTableWait(xwait);
goto l1; /* continue from the begin */
goto l1;/* continue from the begin */
}
elog(ERROR, "Cannot insert a duplicate key into a unique index");
}
@ -571,10 +572,10 @@ _bt_insertonpg(Relation rel,
* reasoning).
*/
l_spl:;
l_spl: ;
if (stack == (BTStack) NULL)
{
if (!is_root) /* if this page was not root page */
if (!is_root) /* if this page was not root page */
{
elog(DEBUG, "btree: concurrent ROOT page split");
stack = (BTStack) palloc(sizeof(BTStackData));
@ -1144,8 +1145,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
lpage = BufferGetPage(lbuf);
rpage = BufferGetPage(rbuf);
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
rootbknum;
/*

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.20 1999/04/22 08:19:59 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.21 1999/05/25 16:07:26 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -421,7 +421,7 @@ _bt_pageinit(Page page, Size size)
MemSet(page, 0, size);
PageInit(page, size, sizeof(BTPageOpaqueData));
((BTPageOpaque) PageGetSpecialPointer(page))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(page))->btpo_parent =
InvalidBlockNumber;
}
@ -494,17 +494,16 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);
if (stack->bts_offset == InvalidOffsetNumber ||
if (stack->bts_offset == InvalidOffsetNumber ||
maxoff >= stack->bts_offset)
{
/*
* _bt_insertonpg set bts_offset to InvalidOffsetNumber
* in the case of concurrent ROOT page split
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
* case of concurrent ROOT page split
*/
if (stack->bts_offset == InvalidOffsetNumber)
{
i = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
}
else
{
itemid = PageGetItemId(page, stack->bts_offset);
@ -524,7 +523,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
}
/* if the item has just moved right on this page, we're done */
for ( ;
for (;
i <= maxoff;
i = OffsetNumberNext(i))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.37 1999/03/28 20:31:58 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.38 1999/05/25 16:07:27 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -367,7 +367,7 @@ btinsert(Relation rel, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
btitem = _bt_formitem(itup);
res = _bt_doinsert(rel, btitem,
IndexIsUnique(RelationGetRelid(rel)), heapRel);
IndexIsUnique(RelationGetRelid(rel)), heapRel);
pfree(btitem);
pfree(itup);
@ -391,9 +391,10 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
/*
* Restore scan position using heap TID returned
* by previous call to btgettuple().
* Restore scan position using heap TID returned by previous call
* to btgettuple().
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@ -623,16 +624,15 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
* We use this as flag when first index tuple on page
* is deleted but we do not move left (this would
* slowdown vacuum) - so we set current->ip_posid
* before first index tuple on the current page
* We use this as flag when first index tuple on page is deleted but
* we do not move left (this would slowdown vacuum) - so we set
* current->ip_posid before first index tuple on the current page
* (_bt_step will move it right)...
*/
if (!ItemPointerIsValid(&target))
{
ItemPointerSetOffsetNumber(&(scan->currentItemData),
OffsetNumberPrev(P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY));
ItemPointerSetOffsetNumber(&(scan->currentItemData),
OffsetNumberPrev(P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY));
return;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.20 1999/03/28 20:31:58 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.21 1999/05/25 16:07:29 momjian Exp $
*
*
* NOTES
@ -112,12 +112,12 @@ _bt_adjscans(Relation rel, ItemPointer tid)
static void
_bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
ItemPointer current;
Buffer buf;
BTScanOpaque so;
OffsetNumber start;
Page page;
BTPageOpaque opaque;
ItemPointer current;
Buffer buf;
BTScanOpaque so;
OffsetNumber start;
Page page;
BTPageOpaque opaque;
so = (BTScanOpaque) scan->opaque;
buf = so->btso_curbuf;
@ -140,7 +140,7 @@ _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
Page pg = BufferGetPage(buf);
BTItem btitem = (BTItem) PageGetItem(pg,
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
so->curHeapIptr = btitem->bti_itup.t_tid;
}
@ -181,7 +181,7 @@ _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
Page pg = BufferGetPage(buf);
BTItem btitem = (BTItem) PageGetItem(pg,
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
so->mrkHeapIptr = btitem->bti_itup.t_tid;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.43 1999/04/13 17:18:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.44 1999/05/25 16:07:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -706,7 +706,7 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
so = (BTScanOpaque) scan->opaque;
current = &(scan->currentItemData);
Assert (BufferIsValid(so->btso_curbuf));
Assert(BufferIsValid(so->btso_curbuf));
/* we still have the buffer pinned and locked */
buf = so->btso_curbuf;
@ -733,8 +733,8 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
return res;
}
} while (keysok >= so->numberOfFirstKeys ||
(keysok == -1 && ScanDirectionIsBackward(dir)));
} while (keysok >= so->numberOfFirstKeys ||
(keysok == -1 && ScanDirectionIsBackward(dir)));
ItemPointerSetInvalid(current);
so->btso_curbuf = InvalidBuffer;
@ -776,8 +776,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so;
ScanKeyData skdata;
Size keysok;
int i;
int nKeyIndex = -1;
int i;
int nKeyIndex = -1;
rel = scan->relation;
so = (BTScanOpaque) scan->opaque;
@ -795,27 +795,27 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsBackward(dir))
{
for (i=0; i<so->numberOfKeys; i++)
for (i = 0; i < so->numberOfKeys; i++)
{
if (so->keyData[i].sk_attno != 1)
break;
strat = _bt_getstrat(rel, so->keyData[i].sk_attno,
so->keyData[i].sk_procedure);
strat = _bt_getstrat(rel, so->keyData[i].sk_attno,
so->keyData[i].sk_procedure);
if (strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber||
strat == BTEqualStrategyNumber)
strat == BTLessEqualStrategyNumber ||
strat == BTEqualStrategyNumber)
{
nKeyIndex = i;
break;
}
}
}
else
else
{
strat = _bt_getstrat(rel, 1, so->keyData[0].sk_procedure);
if (strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber)
strat == BTLessEqualStrategyNumber)
;
else
nKeyIndex = 0;
@ -850,7 +850,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
proc = index_getprocid(rel, 1, BTORDER_PROC);
ScanKeyEntryInitialize(&skdata, so->keyData[nKeyIndex].sk_flags,
1, proc, so->keyData[nKeyIndex].sk_argument);
1, proc, so->keyData[nKeyIndex].sk_argument);
stack = _bt_search(rel, 1, &skdata, &buf);
_bt_freestack(stack);
@ -1104,9 +1104,10 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
rel = scan->relation;
current = &(scan->currentItemData);
/*
* Don't use ItemPointerGetOffsetNumber or you risk to get
* assertion due to ability of ip_posid to be equal 0.
* Don't use ItemPointerGetOffsetNumber or you risk to get assertion
* due to ability of ip_posid to be equal 0.
*/
offnum = current->ip_posid;
page = BufferGetPage(*bufP);

View File

@ -5,7 +5,7 @@
*
*
* IDENTIFICATION
* $Id: nbtsort.c,v 1.38 1999/05/09 00:53:19 tgl Exp $
* $Id: nbtsort.c,v 1.39 1999/05/25 16:07:34 momjian Exp $
*
* NOTES
*
@ -552,16 +552,16 @@ _bt_spoolinit(Relation index, int ntapes, bool isunique)
btspool->bts_tape = 0;
btspool->isunique = isunique;
btspool->bts_itape =(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_otape =(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_itape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_otape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
if (btspool->bts_itape == (BTTapeBlock **) NULL ||
btspool->bts_otape == (BTTapeBlock **) NULL)
elog(ERROR, "_bt_spoolinit: out of memory");
for (i = 0; i < ntapes; ++i)
{
btspool->bts_itape[i] = _bt_tapecreate();
btspool->bts_otape[i] = _bt_tapecreate();
btspool->bts_itape[i] = _bt_tapecreate();
btspool->bts_otape[i] = _bt_tapecreate();
}
_bt_isortcmpinit(index, btspool);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.31 1999/02/13 23:14:42 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.32 1999/05/25 16:07:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -307,7 +307,7 @@ rtinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation he
/*
* Notes in ExecUtils:ExecOpenIndices()
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
res = rtdoinsert(r, itup, &rtState);
@ -947,10 +947,10 @@ rtdelete(Relation r, ItemPointer tid)
Page page;
/*
* Notes in ExecUtils:ExecOpenIndices()
* Also note that only vacuum deletes index tuples now...
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
* deletes index tuples now...
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
blkno = ItemPointerGetBlockNumber(tid);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.22 1999/02/13 23:14:43 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.23 1999/05/25 16:07:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -69,7 +69,7 @@ rtbeginscan(Relation r,
/*
* Let index_beginscan does its work...
*
RelationSetLockForRead(r);
* RelationSetLockForRead(r);
*/
s = RelationGetIndexScan(r, fromEnd, nkeys, key);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.25 1999/03/30 01:37:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.26 1999/05/25 16:07:45 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@ -221,7 +221,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
/*
* update (invalidate) our single item TransactionLogTest cache.
*
if (status != XID_COMMIT)
* if (status != XID_COMMIT)
*
* What's the hell ?! Why != XID_COMMIT ?!
*/
@ -374,7 +374,7 @@ TransRecover(Relation logRelation)
*/
/*
* InitializeTransactionLog
* InitializeTransactionLog
* Initializes transaction logging.
*/
void
@ -484,7 +484,7 @@ InitializeTransactionLog(void)
*/
/*
* TransactionIdDidCommit
* TransactionIdDidCommit
* True iff transaction associated with the identifier did commit.
*
* Note:
@ -500,7 +500,7 @@ TransactionIdDidCommit(TransactionId transactionId)
}
/*
* TransactionIdDidAborted
* TransactionIdDidAborted
* True iff transaction associated with the identifier did abort.
*
* Note:
@ -541,7 +541,7 @@ TransactionIdIsInProgress(TransactionId transactionId)
*/
/*
* TransactionIdCommit
* TransactionIdCommit
* Commits the transaction associated with the identifier.
*
* Note:
@ -557,7 +557,7 @@ TransactionIdCommit(TransactionId transactionId)
}
/*
* TransactionIdAbort
* TransactionIdAbort
* Aborts the transaction associated with the identifier.
*
* Note:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.19 1999/02/13 23:14:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.20 1999/05/25 16:07:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -260,7 +260,7 @@ VariableRelationPutNextOid(Oid *oidP)
* In the version 2 transaction system, transaction id's are
* restricted in several ways.
*
* -- Old comments removed
* -- Old comments removed
*
* Second, since we may someday preform compression of the data
* in the log and time relations, we cause the numbering of the

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.35 1999/05/13 00:34:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.36 1999/05/25 16:07:50 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@ -194,8 +194,8 @@ TransactionStateData CurrentTransactionStateData = {
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
/* ----------------
* info returned when the system is disabled
@ -299,6 +299,7 @@ IsTransactionState(void)
*/
return false;
}
#endif
/* --------------------------------
@ -516,7 +517,7 @@ CommandCounterIncrement()
AtStart_Cache();
TransactionIdFlushCache();
}
void
@ -695,9 +696,9 @@ AtCommit_Memory()
/* ----------------
* Release memory in the blank portal.
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is probably not necessary, but seems like a good idea...)
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is probably not necessary, but seems like a good idea...)
* ----------------
*/
portal = GetPortalByName(NULL);
@ -789,9 +790,9 @@ AtAbort_Memory()
/* ----------------
* Release memory in the blank portal.
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is ESSENTIAL in case we aborted from someplace where it wasn't.)
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is ESSENTIAL in case we aborted from someplace where it wasn't.)
* ----------------
*/
portal = GetPortalByName(NULL);
@ -1074,7 +1075,7 @@ StartTransactionCommand()
break;
/* ----------------
* As with BEGIN, we should never experience this
* As with BEGIN, we should never experience this
* if we do it means the END state was not changed in the
* previous CommitTransactionCommand(). If we get it, we
* print a warning, commit the transaction, start a new
@ -1509,6 +1510,7 @@ AbortOutOfAnyTransaction()
*/
if (s->state != TRANS_DEFAULT)
AbortTransaction();
/*
* Now reset the high-level state
*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: xid.c,v 1.21 1999/02/13 23:14:49 momjian Exp $
* $Id: xid.c,v 1.22 1999/05/25 16:07:52 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING

View File

@ -7,7 +7,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.59 1999/05/10 00:44:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.60 1999/05/25 16:07:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -182,7 +182,7 @@ static char *relname; /* current relation name */
Form_pg_attribute attrtypes[MAXATTR]; /* points to attribute info */
static char *values[MAXATTR]; /* cooresponding attribute values */
int numattr; /* number of attributes for cur. rel */
extern bool disableFsync; /* do not fsync the database */
extern bool disableFsync; /* do not fsync the database */
int DebugMode;
static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem
@ -587,7 +587,9 @@ DefineAttr(char *name, char *type, int attnum)
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
/* Cheat like mad to fill in these items from the length only.
/*
* Cheat like mad to fill in these items from the length only.
* This only has to work for types used in the system catalogs...
*/
switch (attlen)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.20 1999/02/13 23:14:55 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.21 1999/05/25 16:08:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -29,8 +29,8 @@
char *
relpath(char *relname)
{
char *path;
int bufsize = 0;
char *path;
int bufsize = 0;
if (IsSharedSystemRelationName(relname))
{
@ -43,7 +43,7 @@ relpath(char *relname)
}
/*
* IsSystemRelationName
* IsSystemRelationName
* True iff name is the name of a system catalog relation.
*
* We now make a new requirement where system catalog relns must begin
@ -64,7 +64,7 @@ IsSystemRelationName(char *relname)
}
/*
* IsSharedSystemRelationName
* IsSharedSystemRelationName
* True iff name is the name of a shared system catalog relation.
*/
bool

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.84 1999/05/22 04:12:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.85 1999/05/25 16:08:03 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -70,8 +70,8 @@
#endif
static void AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc, Oid new_rel_oid, unsigned natts,
char relkind, char *temp_relname);
Relation new_rel_desc, Oid new_rel_oid, unsigned natts,
char relkind, char *temp_relname);
static void AddToNoNameRelList(Relation r);
static void DeleteAttributeTuples(Relation rel);
static void DeleteRelationTuple(Relation rel);
@ -185,7 +185,7 @@ heap_create(char *relname,
bool nailme = false;
int natts = tupDesc->natts;
static unsigned int uniqueId = 0;
extern GlobalMemory CacheCxt;
MemoryContext oldcxt;
@ -240,23 +240,21 @@ heap_create(char *relname,
nailme = true;
}
else
{
relid = newoid();
}
if (isnoname)
{
Assert(!relname);
relname = palloc(NAMEDATALEN);
snprintf(relname, NAMEDATALEN, "pg_noname.%d.%u",
(int) MyProcPid, uniqueId++);
(int) MyProcPid, uniqueId++);
}
if (istemp)
{
/* replace relname of caller */
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++);
(int) MyProcPid, uniqueId++);
}
/* ----------------
@ -272,7 +270,7 @@ heap_create(char *relname,
/*
* create a new tuple descriptor from the one passed in
*/
*/
rel->rd_att = CreateTupleDescCopyConstr(tupDesc);
/* ----------------
@ -321,7 +319,7 @@ heap_create(char *relname,
* ----------------
*/
rel->rd_nonameunlinked = TRUE; /* change once table is created */
rel->rd_nonameunlinked = TRUE; /* change once table is created */
rel->rd_fd = (File) smgrcreate(DEFAULT_SMGR, rel);
rel->rd_nonameunlinked = FALSE;
@ -479,8 +477,8 @@ RelnameFindRelid(char *relname)
if (!IsBootstrapProcessingMode())
{
tuple = SearchSysCacheTuple(RELNAME,
PointerGetDatum(relname),
0, 0, 0);
PointerGetDatum(relname),
0, 0, 0);
if (HeapTupleIsValid(tuple))
relid = tuple->t_data->t_oid;
else
@ -488,10 +486,10 @@ RelnameFindRelid(char *relname)
}
else
{
Relation pg_class_desc;
Relation pg_class_desc;
ScanKeyData key;
HeapScanDesc pg_class_scan;
pg_class_desc = heap_openr(RelationRelationName);
/* ----------------
@ -504,7 +502,7 @@ RelnameFindRelid(char *relname)
(AttrNumber) Anum_pg_class_relname,
(RegProcedure) F_NAMEEQ,
(Datum) relname);
/* ----------------
* begin the scan
* ----------------
@ -514,14 +512,14 @@ RelnameFindRelid(char *relname)
SnapshotNow,
1,
&key);
/* ----------------
* get a tuple. if the tuple is NULL then it means we
* didn't find an existing relation.
* ----------------
*/
tuple = heap_getnext(pg_class_scan, 0);
if (HeapTupleIsValid(tuple))
relid = tuple->t_data->t_oid;
else
@ -594,7 +592,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
(char *) *dpp);
heap_insert(rel, tup);
if (hasindex)
CatalogIndexInsert(idescs, Num_pg_attr_indices, rel, tup);
@ -643,11 +641,11 @@ AddNewAttributeTuples(Oid new_rel_oid,
*/
static void
AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc,
Oid new_rel_oid,
unsigned natts,
char relkind,
char *temp_relname)
Relation new_rel_desc,
Oid new_rel_oid,
unsigned natts,
char relkind,
char *temp_relname)
{
Form_pg_class new_rel_reltup;
HeapTuple tup;
@ -678,12 +676,12 @@ AddNewRelationTuple(Relation pg_class_desc,
* the table has been proven to be small by VACUUM or CREATE INDEX.
* (NOTE: if user does CREATE TABLE, then CREATE INDEX, then loads
* the table, he still loses until he vacuums, because CREATE INDEX
* will set reltuples to zero. Can't win 'em all. Maintaining the
* will set reltuples to zero. Can't win 'em all. Maintaining the
* stats on-the-fly would solve the problem, but the overhead of that
* would likely cost more than it'd save.)
* ----------------
*/
new_rel_reltup->relpages = 10; /* bogus estimates */
new_rel_reltup->relpages = 10; /* bogus estimates */
new_rel_reltup->reltuples = 1000;
new_rel_reltup->relowner = GetUserId();
@ -716,9 +714,10 @@ AddNewRelationTuple(Relation pg_class_desc,
if (temp_relname)
create_temp_relation(temp_relname, tup);
if (!isBootstrap)
{
/*
* First, open the catalog indices and insert index tuples for the
* new relation.
@ -730,7 +729,7 @@ AddNewRelationTuple(Relation pg_class_desc,
/* now restore processing mode */
SetProcessingMode(NormalProcessing);
}
pfree(tup);
}
@ -788,8 +787,8 @@ heap_create_with_catalog(char *relname,
Relation new_rel_desc;
Oid new_rel_oid;
int natts = tupdesc->natts;
char *temp_relname = NULL;
char *temp_relname = NULL;
/* ----------------
* sanity checks
* ----------------
@ -804,33 +803,34 @@ heap_create_with_catalog(char *relname,
/* temp tables can mask non-temp tables */
if ((!istemp && RelnameFindRelid(relname)) ||
(istemp && get_temp_rel_by_name(relname) != NULL))
(istemp && get_temp_rel_by_name(relname) != NULL))
elog(ERROR, "Relation '%s' already exists", relname);
/* invalidate cache so non-temp table is masked by temp */
if (istemp)
{
Oid relid = RelnameFindRelid(relname);
Oid relid = RelnameFindRelid(relname);
if (relid != InvalidOid)
{
/*
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
*/
RelationForgetRelation(relid);
ResetSystemCache();
}
}
}
/* save user relation name because heap_create changes it */
if (istemp)
{
temp_relname = pstrdup(relname); /* save original value */
temp_relname = pstrdup(relname); /* save original value */
relname = palloc(NAMEDATALEN);
strcpy(relname, temp_relname); /* heap_create will change this */
strcpy(relname, temp_relname); /* heap_create will change this */
}
/* ----------------
* ok, relation does not already exist so now we
* create an uncataloged relation and pull its relation oid
@ -838,7 +838,7 @@ heap_create_with_catalog(char *relname,
*
* Note: The call to heap_create() does all the "real" work
* of creating the disk file for the relation.
* This changes relname for noname and temp tables.
* This changes relname for noname and temp tables.
* ----------------
*/
new_rel_desc = heap_create(relname, tupdesc, false, istemp);
@ -866,11 +866,11 @@ heap_create_with_catalog(char *relname,
pg_class_desc = heap_openr(RelationRelationName);
AddNewRelationTuple(pg_class_desc,
new_rel_desc,
new_rel_oid,
natts,
relkind,
temp_relname);
new_rel_desc,
new_rel_oid,
natts,
relkind,
temp_relname);
StoreConstraints(new_rel_desc);
@ -1320,7 +1320,7 @@ heap_destroy_with_catalog(char *relname)
if (istemp)
remove_temp_relation(rid);
/* ----------------
* delete type tuple. here we want to see the effects
* of the deletions we just did, so we use setheapoverride().
@ -1334,7 +1334,7 @@ heap_destroy_with_catalog(char *relname)
* delete relation tuple
* ----------------
*/
/* must delete fake tuple in cache */
/* must delete fake tuple in cache */
DeleteRelationTuple(rel);
/*
@ -1516,10 +1516,12 @@ StoreAttrDefault(Relation rel, AttrDefault *attrdef)
extern GlobalMemory CacheCxt;
start:
/* Surround table name with double quotes to allow mixed-case and
/*
* Surround table name with double quotes to allow mixed-case and
* whitespaces in names. - BGA 1998-11-14
*/
snprintf(str, MAX_PARSE_BUFFER,
snprintf(str, MAX_PARSE_BUFFER,
"select %s%s from \"%.*s\"", attrdef->adsrc, cast,
NAMEDATALEN, rel->rd_rel->relname.data);
setheapoverride(true);
@ -1539,16 +1541,16 @@ start:
if (type != atp->atttypid)
{
if (IS_BINARY_COMPATIBLE(type, atp->atttypid))
; /* use without change */
; /* use without change */
else if (can_coerce_type(1, &(type), &(atp->atttypid)))
expr = coerce_type(NULL, (Node *)expr, type, atp->atttypid,
atp->atttypmod);
expr = coerce_type(NULL, (Node *) expr, type, atp->atttypid,
atp->atttypmod);
else if (IsA(expr, Const))
{
if (*cast != 0)
elog(ERROR, "DEFAULT clause const type '%s' mismatched with column type '%s'",
typeidTypeName(type), typeidTypeName(atp->atttypid));
snprintf(cast, 2*NAMEDATALEN, ":: %s", typeidTypeName(atp->atttypid));
snprintf(cast, 2 * NAMEDATALEN, ":: %s", typeidTypeName(atp->atttypid));
goto start;
}
else
@ -1598,12 +1600,13 @@ StoreRelCheck(Relation rel, ConstrCheck *check)
char nulls[4] = {' ', ' ', ' ', ' '};
extern GlobalMemory CacheCxt;
/* Check for table's existance. Surround table name with double-quotes
/*
* Check for table's existance. Surround table name with double-quotes
* to allow mixed-case and whitespace names. - thomas 1998-11-12
*/
snprintf(str, MAX_PARSE_BUFFER,
"select 1 from \"%.*s\" where %s",
NAMEDATALEN, rel->rd_rel->relname.data, check->ccsrc);
snprintf(str, MAX_PARSE_BUFFER,
"select 1 from \"%.*s\" where %s",
NAMEDATALEN, rel->rd_rel->relname.data, check->ccsrc);
setheapoverride(true);
planTree_list = pg_parse_and_plan(str, NULL, 0,
&queryTree_list, None, FALSE);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.74 1999/05/17 00:27:45 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.75 1999/05/25 16:08:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -60,8 +60,8 @@
#define NTUPLES_PER_PAGE(natts) (BLCKSZ/((natts)*AVG_TUPLE_SIZE))
/* non-export function prototypes */
static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
bool istemp);
static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
bool istemp);
static TupleDesc BuildFuncTupleDesc(FuncIndexInfo *funcInfo);
static TupleDesc ConstructTupleDescriptor(Oid heapoid, Relation heapRelation,
List *attributeList,
@ -77,7 +77,7 @@ static void
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
FuncIndexInfo *funcInfo, int natts,
AttrNumber *attNums, Oid *classOids, Node *predicate,
List *attributeList, bool islossy, bool unique, bool primary);
List *attributeList, bool islossy, bool unique, bool primary);
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
int numberOfAttributes, AttrNumber *attributeNumber,
IndexStrategy indexStrategy, uint16 parameterCount,
@ -126,11 +126,11 @@ GetHeapRelationOid(char *heapRelationName, char *indexRelationName, bool istemp)
Oid indoid;
Oid heapoid;
indoid = RelnameFindRelid(indexRelationName);
if ((!istemp && OidIsValid(indoid)) ||
(istemp && get_temp_rel_by_name(indexRelationName) != NULL))
(istemp && get_temp_rel_by_name(indexRelationName) != NULL))
elog(ERROR, "Cannot create index: '%s' already exists",
indexRelationName);
@ -139,7 +139,7 @@ GetHeapRelationOid(char *heapRelationName, char *indexRelationName, bool istemp)
if (!OidIsValid(heapoid))
elog(ERROR, "Cannot create index on '%s': relation does not exist",
heapRelationName);
return heapoid;
}
@ -356,7 +356,7 @@ ConstructTupleDescriptor(Oid heapoid,
}
/* ----------------------------------------------------------------
* AccessMethodObjectIdGetForm
* AccessMethodObjectIdGetForm
* Returns the formated access method tuple given its object identifier.
*
* XXX ADD INDEXING
@ -482,7 +482,7 @@ UpdateRelationRelation(Relation indexRelation, char *temp_relname)
if (temp_relname)
create_temp_relation(temp_relname, tuple);
/*
* During normal processing, we need to make sure that the system
* catalog indices are correct. Bootstrap (initdb) time doesn't
@ -571,7 +571,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
value[Anum_pg_attribute_attcacheoff - 1] = Int32GetDatum(-1);
init_tuple = heap_addheader(Natts_pg_attribute,
ATTRIBUTE_TUPLE_SIZE,
ATTRIBUTE_TUPLE_SIZE,
(char *) (indexRelation->rd_att->attrs[0]));
hasind = false;
@ -611,7 +611,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
*/
memmove(GETSTRUCT(cur_tuple),
(char *) indexTupDesc->attrs[i],
ATTRIBUTE_TUPLE_SIZE);
ATTRIBUTE_TUPLE_SIZE);
value[Anum_pg_attribute_attnum - 1] = Int16GetDatum(i + 1);
@ -657,7 +657,7 @@ UpdateIndexRelation(Oid indexoid,
List *attributeList,
bool islossy,
bool unique,
bool primary)
bool primary)
{
Form_pg_index indexForm;
IndexElem *IndexKey;
@ -686,7 +686,7 @@ UpdateIndexRelation(Oid indexoid,
predLen = VARSIZE(predText);
itupLen = predLen + sizeof(FormData_pg_index);
indexForm = (Form_pg_index) palloc(itupLen);
memset (indexForm, 0, sizeof(FormData_pg_index));
memset(indexForm, 0, sizeof(FormData_pg_index));
memmove((char *) &indexForm->indpred, (char *) predText, predLen);
@ -939,7 +939,7 @@ index_create(char *heapRelationName,
Node *predicate,
bool islossy,
bool unique,
bool primary)
bool primary)
{
Relation heapRelation;
Relation indexRelation;
@ -948,15 +948,15 @@ index_create(char *heapRelationName,
Oid indexoid;
PredInfo *predInfo;
bool istemp = (get_temp_rel_by_name(heapRelationName) != NULL);
char *temp_relname = NULL;
char *temp_relname = NULL;
/* ----------------
* check parameters
* ----------------
*/
if (numatts < 1)
elog(ERROR, "must index at least one attribute");
/* ----------------
* get heap relation oid and open the heap relation
* XXX ADD INDEXING
@ -987,25 +987,27 @@ index_create(char *heapRelationName,
/* invalidate cache so possible non-temp index is masked by temp */
if (istemp)
{
Oid relid = RelnameFindRelid(indexRelationName);
Oid relid = RelnameFindRelid(indexRelationName);
if (relid != InvalidOid)
{
/*
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
*/
RelationForgetRelation(relid);
ResetSystemCache();
}
}
/* save user relation name because heap_create changes it */
if (istemp)
{
temp_relname = pstrdup(indexRelationName); /* save original value */
temp_relname = pstrdup(indexRelationName); /* save original value */
indexRelationName = palloc(NAMEDATALEN);
strcpy(indexRelationName, temp_relname); /* heap_create will change this */
strcpy(indexRelationName, temp_relname); /* heap_create will
* change this */
}
/* ----------------
@ -1122,8 +1124,8 @@ index_destroy(Oid indexId)
Relation relationRelation;
Relation attributeRelation;
HeapTuple tuple;
int16 attnum;
int16 attnum;
Assert(OidIsValid(indexId));
/* Open now to obtain lock by referencing table? bjm */
@ -1166,7 +1168,7 @@ index_destroy(Oid indexId)
/* does something only if it is a temp index */
remove_temp_relation(indexId);
/* ----------------
* fix INDEX relation
* ----------------

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.37 1999/05/10 00:44:55 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.38 1999/05/25 16:08:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -46,15 +46,15 @@
*/
char *Name_pg_attr_indices[Num_pg_attr_indices] = {AttributeNameIndex,
AttributeNumIndex,
AttributeRelidIndex};
AttributeNumIndex,
AttributeRelidIndex};
char *Name_pg_proc_indices[Num_pg_proc_indices] = {ProcedureNameIndex,
ProcedureOidIndex,
ProcedureSrcIndex};
ProcedureOidIndex,
ProcedureSrcIndex};
char *Name_pg_type_indices[Num_pg_type_indices] = {TypeNameIndex,
TypeOidIndex};
TypeOidIndex};
char *Name_pg_class_indices[Num_pg_class_indices] = {ClassNameIndex,
ClassOidIndex};
ClassOidIndex};
char *Name_pg_attrdef_indices[Num_pg_attrdef_indices] = {AttrDefaultIndex};
char *Name_pg_relcheck_indices[Num_pg_relcheck_indices] = {RelCheckIndex};
@ -63,9 +63,9 @@ char *Name_pg_trigger_indices[Num_pg_trigger_indices] = {TriggerRelidIndex};
static HeapTuple CatalogIndexFetchTuple(Relation heapRelation,
Relation idesc,
ScanKey skey,
int16 num_keys);
Relation idesc,
ScanKey skey,
int16 num_keys);
/*
@ -126,13 +126,13 @@ CatalogIndexInsert(Relation *idescs,
index_tup = SearchSysCacheTupleCopy(INDEXRELID,
ObjectIdGetDatum(idescs[i]->rd_id),
0, 0, 0);
0, 0, 0);
Assert(index_tup);
index_form = (Form_pg_index) GETSTRUCT(index_tup);
if (index_form->indproc != InvalidOid)
{
int fatts;
int fatts;
/*
* Compute the number of attributes we are indexing upon.
@ -152,7 +152,7 @@ CatalogIndexInsert(Relation *idescs,
natts = RelationGetDescr(idescs[i])->natts;
finfoP = (FuncIndexInfo *) NULL;
}
FormIndexDatum(natts,
(AttrNumber *) index_form->indkey,
heapTuple,
@ -229,11 +229,11 @@ CatalogIndexFetchTuple(Relation heapRelation,
ScanKey skey,
int16 num_keys)
{
IndexScanDesc sd;
IndexScanDesc sd;
RetrieveIndexResult indexRes;
HeapTupleData tuple;
HeapTuple result = NULL;
Buffer buffer;
HeapTupleData tuple;
HeapTuple result = NULL;
Buffer buffer;
sd = index_beginscan(idesc, false, num_keys, skey);
tuple.t_data = NULL;
@ -462,7 +462,7 @@ ClassNameIndexScan(Relation heapRelation, char *relName)
*/
if ((tuple = get_temp_rel_by_name(relName)) != NULL)
return heap_copytuple(tuple);
ScanKeyEntryInitialize(&skey[0],
(bits16) 0x0,
(AttrNumber) 1,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.36 1999/05/10 00:44:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.37 1999/05/25 16:08:09 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@ -36,15 +36,15 @@
#endif
static Oid OperatorGetWithOpenRelation(Relation pg_operator_desc,
const char *operatorName,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
const char *operatorName,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
static Oid OperatorGet(char *operatorName,
char *leftTypeName,
char *rightTypeName,
bool *defined);
char *leftTypeName,
char *rightTypeName,
bool *defined);
static Oid OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
char *operatorName,
@ -135,6 +135,7 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
if (HeapTupleIsValid(tup))
{
regproc oprcode = ((Form_pg_operator) GETSTRUCT(tup))->oprcode;
operatorObjectId = tup->t_data->t_oid;
*defined = RegProcedureIsValid(oprcode);
}
@ -259,7 +260,7 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
/* ----------------
* initialize *values with the operator name and input data types.
* Note that oprcode is set to InvalidOid, indicating it's a shell.
* Note that oprcode is set to InvalidOid, indicating it's a shell.
* ----------------
*/
i = 0;
@ -356,9 +357,9 @@ OperatorShellMake(char *operatorName,
* ----------------
*/
operatorObjectId = OperatorShellMakeWithOpenRelation(pg_operator_desc,
operatorName,
leftObjectId,
rightObjectId);
operatorName,
leftObjectId,
rightObjectId);
/* ----------------
* close the operator relation and return the oid.
* ----------------
@ -506,8 +507,9 @@ OperatorDef(char *operatorName,
elog(ERROR, "OperatorDef: operator \"%s\" already defined",
operatorName);
/* At this point, if operatorObjectId is not InvalidOid then
* we are filling in a previously-created shell.
/*
* At this point, if operatorObjectId is not InvalidOid then we are
* filling in a previously-created shell.
*/
/* ----------------
@ -580,7 +582,7 @@ OperatorDef(char *operatorName,
values[Anum_pg_operator_oprcode - 1] = ObjectIdGetDatum(tup->t_data->t_oid);
values[Anum_pg_operator_oprresult - 1] = ObjectIdGetDatum(((Form_pg_proc)
GETSTRUCT(tup))->prorettype);
GETSTRUCT(tup))->prorettype);
/* ----------------
* find restriction
@ -648,7 +650,8 @@ OperatorDef(char *operatorName,
values[i++] = ObjectIdGetDatum(leftTypeId);
values[i++] = ObjectIdGetDatum(rightTypeId);
++i; /* Skip "oprresult", it was filled in above */
++i; /* Skip "oprresult", it was filled in
* above */
/*
* Set up the other operators. If they do not currently exist, create
@ -663,16 +666,16 @@ OperatorDef(char *operatorName,
{
if (name[j])
{
char *otherLeftTypeName = NULL;
char *otherRightTypeName = NULL;
Oid otherLeftTypeId = InvalidOid;
Oid otherRightTypeId = InvalidOid;
Oid other_oid = InvalidOid;
bool otherDefined = false;
char *otherLeftTypeName = NULL;
char *otherRightTypeName = NULL;
Oid otherLeftTypeId = InvalidOid;
Oid otherRightTypeId = InvalidOid;
Oid other_oid = InvalidOid;
bool otherDefined = false;
switch (j)
{
case 0: /* commutator has reversed arg types */
case 0: /* commutator has reversed arg types */
otherLeftTypeName = rightTypeName;
otherRightTypeName = leftTypeName;
otherLeftTypeId = rightTypeId;
@ -683,7 +686,7 @@ OperatorDef(char *operatorName,
&otherDefined);
commutatorId = other_oid;
break;
case 1: /* negator has same arg types */
case 1: /* negator has same arg types */
otherLeftTypeName = leftTypeName;
otherRightTypeName = rightTypeName;
otherLeftTypeId = leftTypeId;
@ -694,7 +697,7 @@ OperatorDef(char *operatorName,
&otherDefined);
negatorId = other_oid;
break;
case 2: /* left sort op takes left-side data type */
case 2: /* left sort op takes left-side data type */
otherLeftTypeName = leftTypeName;
otherRightTypeName = leftTypeName;
otherLeftTypeId = leftTypeId;
@ -704,7 +707,8 @@ OperatorDef(char *operatorName,
otherRightTypeName,
&otherDefined);
break;
case 3: /* right sort op takes right-side data type */
case 3: /* right sort op takes right-side data
* type */
otherLeftTypeName = rightTypeName;
otherRightTypeName = rightTypeName;
otherLeftTypeId = rightTypeId;
@ -737,8 +741,10 @@ OperatorDef(char *operatorName,
}
else
{
/* self-linkage to this operator; will fix below.
* Note that only self-linkage for commutation makes sense.
/*
* self-linkage to this operator; will fix below. Note
* that only self-linkage for commutation makes sense.
*/
if (j != 0)
elog(ERROR,
@ -804,15 +810,14 @@ OperatorDef(char *operatorName,
/*
* If a commutator and/or negator link is provided, update the other
* operator(s) to point at this one, if they don't already have a link.
* This supports an alternate style of operator definition wherein the
* user first defines one operator without giving negator or
* commutator, then defines the other operator of the pair with the
* proper commutator or negator attribute. That style doesn't require
* creation of a shell, and it's the only style that worked right before
* Postgres version 6.5.
* This code also takes care of the situation where the new operator
* is its own commutator.
* operator(s) to point at this one, if they don't already have a
* link. This supports an alternate style of operator definition
* wherein the user first defines one operator without giving negator
* or commutator, then defines the other operator of the pair with the
* proper commutator or negator attribute. That style doesn't require
* creation of a shell, and it's the only style that worked right
* before Postgres version 6.5. This code also takes care of the
* situation where the new operator is its own commutator.
*/
if (selfCommutator)
commutatorId = operatorObjectId;
@ -869,7 +874,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
tup = heap_getnext(pg_operator_scan, 0);
/* if the commutator and negator are the same operator, do one update.
/*
* if the commutator and negator are the same operator, do one update.
* XXX this is probably useless code --- I doubt it ever makes sense
* for commutator and negator to be the same thing...
*/
@ -1008,7 +1014,7 @@ OperatorCreate(char *operatorName,
if (!leftTypeName && !rightTypeName)
elog(ERROR, "OperatorCreate: at least one of leftarg or rightarg must be defined");
if (! (leftTypeName && rightTypeName))
if (!(leftTypeName && rightTypeName))
{
/* If it's not a binary op, these things mustn't be set: */
if (commutatorName)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.28 1999/05/13 07:28:27 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.29 1999/05/25 16:08:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -226,11 +226,11 @@ ProcedureCreate(char *procedureName,
* function name (the 'prosrc' value) is a known builtin function.
*
* NOTE: in Postgres versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and 'prosrc'
* wasn't used. So there is code out there that does CREATE FUNCTION
* xyz AS '' LANGUAGE 'internal'. To preserve some modicum of
* backwards compatibility, accept an empty 'prosrc' value as meaning
* the supplied SQL function name.
* function could not be different from the internal name, and
* 'prosrc' wasn't used. So there is code out there that does CREATE
* FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum
* of backwards compatibility, accept an empty 'prosrc' value as
* meaning the supplied SQL function name.
*/
if (strcmp(languageName, "internal") == 0)
@ -239,7 +239,7 @@ ProcedureCreate(char *procedureName,
prosrc = procedureName;
if (fmgr_lookupByName(prosrc) == (func_ptr) NULL)
elog(ERROR,
"ProcedureCreate: there is no builtin function named \"%s\"",
"ProcedureCreate: there is no builtin function named \"%s\"",
prosrc);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.36 1999/04/20 03:51:14 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.37 1999/05/25 16:08:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -400,8 +400,8 @@ TypeCreate(char *typeName,
procname = procs[j];
/*
* First look for a 1-argument func with all argtypes 0.
* This is valid for all four kinds of procedure.
* First look for a 1-argument func with all argtypes 0. This is
* valid for all four kinds of procedure.
*/
MemSet(argList, 0, 8 * sizeof(Oid));
@ -413,20 +413,23 @@ TypeCreate(char *typeName,
if (!HeapTupleIsValid(tup))
{
/*
* For array types, the input procedures may take 3 args
* (data value, element OID, atttypmod); the pg_proc
* argtype signature is 0,0,INT4OID. The output procedures
* may take 2 args (data value, element OID).
* For array types, the input procedures may take 3 args (data
* value, element OID, atttypmod); the pg_proc argtype
* signature is 0,0,INT4OID. The output procedures may take 2
* args (data value, element OID).
*/
if (OidIsValid(elementObjectId))
{
int nargs;
int nargs;
if (j % 2)
{
/* output proc */
nargs = 2;
} else
}
else
{
/* input proc */
nargs = 3;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.2 1999/03/16 04:25:46 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.3 1999/05/25 16:08:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -121,18 +121,18 @@ static QueryTreeList *tg_parseTeeNode(TgRecipe * r,
void
beginRecipe(RecipeStmt *stmt)
{
TgRecipe *r;
int i,
numTees;
TgRecipe *r;
int i,
numTees;
QueryTreeList *qList;
char portalName[1024];
char portalName[1024];
Plan *plan;
TupleDesc attinfo;
QueryDesc *queryDesc;
Query *parsetree;
Plan *plan;
TupleDesc attinfo;
QueryDesc *queryDesc;
Query *parsetree;
TeeInfo *teeInfo;
TeeInfo *teeInfo;
/*
* retrieveRecipe() reads the recipe from the database and returns a
@ -808,21 +808,21 @@ tg_parseTeeNode(TgRecipe * r,
static QueryTreeList *
tg_parseSubQuery(TgRecipe * r, TgNode * n, TeeInfo * teeInfo)
{
TgElement *elem;
char *funcName;
Oid typev[8], /* eight arguments maximum */
relid;
int i,
parameterCount;
TgElement *elem;
char *funcName;
Oid typev[8], /* eight arguments maximum */
relid;
int i,
parameterCount;
QueryTreeList *qList; /* the parse tree of the nodeElement */
QueryTreeList *inputQlist; /* the list of parse trees for the inputs
* to this node */
QueryTreeList *q;
TgNode *child;
Relation rel;
unsigned int len;
TupleDesc tupdesc;
TgNode *child;
Relation rel;
unsigned int len;
TupleDesc tupdesc;
qList = NULL;

View File

@ -5,17 +5,17 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* The version stuff has not been tested under postgres95 and probably
* The version stuff has not been tested under postgres95 and probably
* doesn't work! - jolly 8/19/95
*
*
* $Id: version.c,v 1.18 1999/02/13 23:15:12 momjian Exp $
* $Id: version.c,v 1.19 1999/05/25 16:08:32 momjian Exp $
*
* NOTES
* At the point the version is defined, 2 physical relations are created
* <vname>_added and <vname>_deleted.
*
* In addition, 4 rules are defined which govern the semantics of
* In addition, 4 rules are defined which govern the semantics of
* versions w.r.t retrieves, appends, replaces and deletes.
*
*-------------------------------------------------------------------------

View File

@ -6,7 +6,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.46 1999/04/25 19:27:43 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.47 1999/05/25 16:08:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -31,17 +31,17 @@
* relname to a list of outstanding NOTIFY requests. Actual processing
* happens if and only if we reach transaction commit. At that time (in
* routine AtCommit_Notify) we scan pg_listener for matching relnames.
* If the listenerPID in a matching tuple is ours, we just send a notify
* If the listenerPID in a matching tuple is ours, we just send a notify
* message to our own front end. If it is not ours, and "notification"
* is not already nonzero, we set notification to our own PID and send a
* SIGUSR2 signal to the receiving process (indicated by listenerPID).
* BTW: if the signal operation fails, we presume that the listener backend
* crashed without removing this tuple, and remove the tuple for it.
* crashed without removing this tuple, and remove the tuple for it.
*
* 4. Upon receipt of a SIGUSR2 signal, the signal handler can call inbound-
* notify processing immediately if this backend is idle (ie, it is
* waiting for a frontend command and is not within a transaction block).
* Otherwise the handler may only set a flag, which will cause the
* Otherwise the handler may only set a flag, which will cause the
* processing to occur just before we next go idle.
*
* 5. Inbound-notify processing consists of scanning pg_listener for tuples
@ -53,7 +53,7 @@
*
* Note that the system's use of pg_listener is confined to very short
* intervals at the end of a transaction that contains NOTIFY statements,
* or during the transaction caused by an inbound SIGUSR2. So the fact that
* or during the transaction caused by an inbound SIGUSR2. So the fact that
* pg_listener is a global resource shouldn't cause too much performance
* problem. But application authors ought to be discouraged from doing
* LISTEN or UNLISTEN near the start of a long transaction --- that would
@ -109,8 +109,8 @@ extern CommandDest whereToSendOutput;
/*
* State for outbound notifies consists of a list of all relnames NOTIFYed
* in the current transaction. We do not actually perform a NOTIFY until
* and unless the transaction commits. pendingNotifies is NULL if no
* in the current transaction. We do not actually perform a NOTIFY until
* and unless the transaction commits. pendingNotifies is NULL if no
* NOTIFYs have been done in the current transaction.
*/
static Dllist *pendingNotifies = NULL;
@ -125,8 +125,8 @@ static Dllist *pendingNotifies = NULL;
* does not grok "volatile", you'd be best advised to compile this file
* with all optimization turned off.
*/
static volatile int notifyInterruptEnabled = 0;
static volatile int notifyInterruptOccurred = 0;
static volatile int notifyInterruptEnabled = 0;
static volatile int notifyInterruptOccurred = 0;
/* True if we've registered an on_shmem_exit cleanup (or at least tried to). */
static int unlistenExitRegistered = 0;
@ -142,7 +142,7 @@ static void ClearPendingNotifies(void);
/*
*--------------------------------------------------------------
* Async_Notify
* Async_Notify
*
* This is executed by the SQL notify command.
*
@ -164,28 +164,29 @@ Async_Notify(char *relname)
/*
* We allocate list memory from the global malloc pool to ensure that
* it will live until we want to use it. This is probably not necessary
* any longer, since we will use it before the end of the transaction.
* DLList only knows how to use malloc() anyway, but we could probably
* palloc() the strings...
* it will live until we want to use it. This is probably not
* necessary any longer, since we will use it before the end of the
* transaction. DLList only knows how to use malloc() anyway, but we
* could probably palloc() the strings...
*/
if (!pendingNotifies)
pendingNotifies = DLNewList();
notifyName = strdup(relname);
DLAddHead(pendingNotifies, DLNewElem(notifyName));
/*
* NOTE: we could check to see if pendingNotifies already has an entry
* for relname, and thus avoid making duplicate entries. However, most
* apps probably don't notify the same name multiple times per transaction,
* so we'd likely just be wasting cycles to make such a check.
* AsyncExistsPendingNotify() doesn't really care whether the list
* contains duplicates...
* for relname, and thus avoid making duplicate entries. However,
* most apps probably don't notify the same name multiple times per
* transaction, so we'd likely just be wasting cycles to make such a
* check. AsyncExistsPendingNotify() doesn't really care whether the
* list contains duplicates...
*/
}
/*
*--------------------------------------------------------------
* Async_Listen
* Async_Listen
*
* This is executed by the SQL listen command.
*
@ -274,7 +275,7 @@ Async_Listen(char *relname, int pid)
/*
* now that we are listening, make sure we will unlisten before dying.
*/
if (! unlistenExitRegistered)
if (!unlistenExitRegistered)
{
if (on_shmem_exit(Async_UnlistenOnExit, (caddr_t) NULL) < 0)
elog(NOTICE, "Async_Listen: out of shmem_exit slots");
@ -284,7 +285,7 @@ Async_Listen(char *relname, int pid)
/*
*--------------------------------------------------------------
* Async_Unlisten
* Async_Unlisten
*
* This is executed by the SQL unlisten command.
*
@ -326,14 +327,16 @@ Async_Unlisten(char *relname, int pid)
UnlockRelation(lRel, AccessExclusiveLock);
heap_close(lRel);
}
/* We do not complain about unlistening something not being listened;
/*
* We do not complain about unlistening something not being listened;
* should we?
*/
}
/*
*--------------------------------------------------------------
* Async_UnlistenAll
* Async_UnlistenAll
*
* Unlisten all relations for this backend.
*
@ -379,7 +382,7 @@ Async_UnlistenAll()
/*
*--------------------------------------------------------------
* Async_UnlistenOnExit
* Async_UnlistenOnExit
*
* Clean up the pg_listener table at backend exit.
*
@ -398,11 +401,12 @@ Async_UnlistenAll()
static void
Async_UnlistenOnExit()
{
/*
* We need to start/commit a transaction for the unlisten,
* but if there is already an active transaction we had better
* abort that one first. Otherwise we'd end up committing changes
* that probably ought to be discarded.
* We need to start/commit a transaction for the unlisten, but if
* there is already an active transaction we had better abort that one
* first. Otherwise we'd end up committing changes that probably
* ought to be discarded.
*/
AbortOutOfAnyTransaction();
/* Now we can do the unlisten */
@ -413,7 +417,7 @@ Async_UnlistenOnExit()
/*
*--------------------------------------------------------------
* AtCommit_Notify
* AtCommit_Notify
*
* This is called at transaction commit.
*
@ -450,12 +454,14 @@ AtCommit_Notify()
int32 listenerPID;
if (!pendingNotifies)
return; /* no NOTIFY statements in this transaction */
return; /* no NOTIFY statements in this
* transaction */
/* NOTIFY is disabled if not normal processing mode.
* This test used to be in xact.c, but it seems cleaner to do it here.
/*
* NOTIFY is disabled if not normal processing mode. This test used to
* be in xact.c, but it seems cleaner to do it here.
*/
if (! IsNormalProcessingMode())
if (!IsNormalProcessingMode())
{
ClearPendingNotifies();
return;
@ -487,10 +493,13 @@ AtCommit_Notify()
if (listenerPID == MyProcPid)
{
/* Self-notify: no need to bother with table update.
/*
* Self-notify: no need to bother with table update.
* Indeed, we *must not* clear the notification field in
* this path, or we could lose an outside notify, which'd be
* bad for applications that ignore self-notify messages.
* this path, or we could lose an outside notify, which'd
* be bad for applications that ignore self-notify
* messages.
*/
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying self");
NotifyMyFrontEnd(relname, listenerPID);
@ -499,23 +508,27 @@ AtCommit_Notify()
{
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying pid %d",
listenerPID);
/*
* If someone has already notified this listener,
* we don't bother modifying the table, but we do still send
* a SIGUSR2 signal, just in case that backend missed the
* earlier signal for some reason. It's OK to send the signal
* first, because the other guy can't read pg_listener until
* we unlock it.
* If someone has already notified this listener, we don't
* bother modifying the table, but we do still send a
* SIGUSR2 signal, just in case that backend missed the
* earlier signal for some reason. It's OK to send the
* signal first, because the other guy can't read
* pg_listener until we unlock it.
*/
#ifdef HAVE_KILL
if (kill(listenerPID, SIGUSR2) < 0)
{
/* Get rid of pg_listener entry if it refers to a PID
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend
* crashed without deleting its pg_listener entries.
* This code used to only delete the entry if errno==ESRCH,
* but as far as I can see we should just do it for any
* failure (certainly at least for EPERM too...)
* This code used to only delete the entry if
* errno==ESRCH, but as far as I can see we should
* just do it for any failure (certainly at least for
* EPERM too...)
*/
heap_delete(lRel, &lTuple->t_self, NULL);
}
@ -536,6 +549,7 @@ AtCommit_Notify()
}
heap_endscan(sRel);
/*
* We do not do RelationUnsetLockForWrite(lRel) here, because the
* transaction is about to be committed anyway.
@ -549,7 +563,7 @@ AtCommit_Notify()
/*
*--------------------------------------------------------------
* AtAbort_Notify
* AtAbort_Notify
*
* This is called at transaction abort.
*
@ -569,7 +583,7 @@ AtAbort_Notify()
/*
*--------------------------------------------------------------
* Async_NotifyHandler
* Async_NotifyHandler
*
* This is the signal handler for SIGUSR2.
*
@ -588,25 +602,30 @@ AtAbort_Notify()
void
Async_NotifyHandler(SIGNAL_ARGS)
{
/*
* Note: this is a SIGNAL HANDLER. You must be very wary what you do here.
* Some helpful soul had this routine sprinkled with TPRINTFs, which would
* likely lead to corruption of stdio buffers if they were ever turned on.
* Note: this is a SIGNAL HANDLER. You must be very wary what you do
* here. Some helpful soul had this routine sprinkled with TPRINTFs,
* which would likely lead to corruption of stdio buffers if they were
* ever turned on.
*/
if (notifyInterruptEnabled)
{
/* I'm not sure whether some flavors of Unix might allow another
* SIGUSR2 occurrence to recursively interrupt this routine.
* To cope with the possibility, we do the same sort of dance that
* EnableNotifyInterrupt must do --- see that routine for comments.
/*
* I'm not sure whether some flavors of Unix might allow another
* SIGUSR2 occurrence to recursively interrupt this routine. To
* cope with the possibility, we do the same sort of dance that
* EnableNotifyInterrupt must do --- see that routine for
* comments.
*/
notifyInterruptEnabled = 0; /* disable any recursive signal */
notifyInterruptOccurred = 1; /* do at least one iteration */
for (;;)
{
notifyInterruptEnabled = 1;
if (! notifyInterruptOccurred)
if (!notifyInterruptOccurred)
break;
notifyInterruptEnabled = 0;
if (notifyInterruptOccurred)
@ -621,14 +640,18 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
else
{
/* In this path it is NOT SAFE to do much of anything, except this: */
/*
* In this path it is NOT SAFE to do much of anything, except
* this:
*/
notifyInterruptOccurred = 1;
}
}
/*
* --------------------------------------------------------------
* EnableNotifyInterrupt
* EnableNotifyInterrupt
*
* This is called by the PostgresMain main loop just before waiting
* for a frontend command. If we are truly idle (ie, *not* inside
@ -652,26 +675,27 @@ EnableNotifyInterrupt(void)
* notifyInterruptOccurred and then set notifyInterruptEnabled, we
* could fail to respond promptly to a signal that happens in between
* those two steps. (A very small time window, perhaps, but Murphy's
* Law says you can hit it...) Instead, we first set the enable flag,
* then test the occurred flag. If we see an unserviced interrupt
* has occurred, we re-clear the enable flag before going off to do
* the service work. (That prevents re-entrant invocation of
* ProcessIncomingNotify() if another interrupt occurs.)
* If an interrupt comes in between the setting and clearing of
* notifyInterruptEnabled, then it will have done the service
* work and left notifyInterruptOccurred zero, so we have to check
* again after clearing enable. The whole thing has to be in a loop
* in case another interrupt occurs while we're servicing the first.
* Once we get out of the loop, enable is set and we know there is no
* Law says you can hit it...) Instead, we first set the enable flag,
* then test the occurred flag. If we see an unserviced interrupt has
* occurred, we re-clear the enable flag before going off to do the
* service work. (That prevents re-entrant invocation of
* ProcessIncomingNotify() if another interrupt occurs.) If an
* interrupt comes in between the setting and clearing of
* notifyInterruptEnabled, then it will have done the service work and
* left notifyInterruptOccurred zero, so we have to check again after
* clearing enable. The whole thing has to be in a loop in case
* another interrupt occurs while we're servicing the first. Once we
* get out of the loop, enable is set and we know there is no
* unserviced interrupt.
*
* NB: an overenthusiastic optimizing compiler could easily break this
* code. Hopefully, they all understand what "volatile" means these days.
* code. Hopefully, they all understand what "volatile" means these
* days.
*/
for (;;)
{
notifyInterruptEnabled = 1;
if (! notifyInterruptOccurred)
if (!notifyInterruptOccurred)
break;
notifyInterruptEnabled = 0;
if (notifyInterruptOccurred)
@ -686,7 +710,7 @@ EnableNotifyInterrupt(void)
/*
* --------------------------------------------------------------
* DisableNotifyInterrupt
* DisableNotifyInterrupt
*
* This is called by the PostgresMain main loop just after receiving
* a frontend command. Signal handler execution of inbound notifies
@ -702,7 +726,7 @@ DisableNotifyInterrupt(void)
/*
* --------------------------------------------------------------
* ProcessIncomingNotify
* ProcessIncomingNotify
*
* Deal with arriving NOTIFYs from other backends.
* This is called either directly from the SIGUSR2 signal handler,
@ -777,6 +801,7 @@ ProcessIncomingNotify(void)
}
}
heap_endscan(sRel);
/*
* We do not do RelationUnsetLockForWrite(lRel) here, because the
* transaction is about to be committed anyway.
@ -785,7 +810,10 @@ ProcessIncomingNotify(void)
CommitTransactionCommand();
/* Must flush the notify messages to ensure frontend gets them promptly. */
/*
* Must flush the notify messages to ensure frontend gets them
* promptly.
*/
pq_flush();
PS_SET_STATUS("idle");
@ -800,20 +828,22 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
if (whereToSendOutput == Remote)
{
StringInfoData buf;
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'A');
pq_sendint(&buf, listenerPID, sizeof(int32));
pq_sendstring(&buf, relname);
pq_endmessage(&buf);
/* NOTE: we do not do pq_flush() here. For a self-notify, it will
/*
* NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
* ProcessIncomingNotify will do it after finding all the
* notifies.
*/
}
else
{
elog(NOTICE, "NOTIFY for %s", relname);
}
}
/* Does pendingNotifies include the given relname?
@ -847,10 +877,12 @@ ClearPendingNotifies()
if (pendingNotifies)
{
/* Since the referenced strings are malloc'd, we have to scan the
/*
* Since the referenced strings are malloc'd, we have to scan the
* list and delete them individually. If we used palloc for the
* strings then we could just do DLFreeList to get rid of both
* the list nodes and the list base...
* strings then we could just do DLFreeList to get rid of both the
* list nodes and the list base...
*/
while ((p = DLRemHead(pendingNotifies)) != NULL)
{

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.38 1999/02/13 23:15:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.39 1999/05/25 16:08:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -236,17 +236,17 @@ copy_heap(Oid OIDOldHeap)
static void
copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
{
Relation OldIndex,
NewHeap;
HeapTuple Old_pg_index_Tuple,
Old_pg_index_relation_Tuple,
pg_proc_Tuple;
Relation OldIndex,
NewHeap;
HeapTuple Old_pg_index_Tuple,
Old_pg_index_relation_Tuple,
pg_proc_Tuple;
Form_pg_index Old_pg_index_Form;
Form_pg_class Old_pg_index_relation_Form;
Form_pg_proc pg_proc_Form;
char *NewIndexName;
AttrNumber *attnumP;
int natts;
Form_pg_proc pg_proc_Form;
char *NewIndexName;
AttrNumber *attnumP;
int natts;
FuncIndexInfo *finfo;
NewHeap = heap_open(OIDNewHeap);
@ -259,14 +259,14 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
*/
Old_pg_index_Tuple = SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(OldIndex)),
0, 0, 0);
0, 0, 0);
Assert(Old_pg_index_Tuple);
Old_pg_index_Form = (Form_pg_index) GETSTRUCT(Old_pg_index_Tuple);
Old_pg_index_relation_Tuple = SearchSysCacheTuple(RELOID,
ObjectIdGetDatum(RelationGetRelid(OldIndex)),
0, 0, 0);
0, 0, 0);
Assert(Old_pg_index_relation_Tuple);
Old_pg_index_relation_Form = (Form_pg_class) GETSTRUCT(Old_pg_index_relation_Tuple);
@ -296,7 +296,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
pg_proc_Tuple = SearchSysCacheTuple(PROOID,
ObjectIdGetDatum(Old_pg_index_Form->indproc),
0, 0, 0);
0, 0, 0);
Assert(pg_proc_Tuple);
pg_proc_Form = (Form_pg_proc) GETSTRUCT(pg_proc_Tuple);
@ -319,7 +319,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
(uint16) 0, (Datum) NULL, NULL,
Old_pg_index_Form->indislossy,
Old_pg_index_Form->indisunique,
Old_pg_index_Form->indisprimary);
Old_pg_index_Form->indisprimary);
heap_close(OldIndex);
heap_close(NewHeap);
@ -329,14 +329,14 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
static void
rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
{
Relation LocalNewHeap,
LocalOldHeap,
LocalOldIndex;
IndexScanDesc ScanDesc;
RetrieveIndexResult ScanResult;
HeapTupleData LocalHeapTuple;
Buffer LocalBuffer;
Oid OIDNewHeapInsert;
Relation LocalNewHeap,
LocalOldHeap,
LocalOldIndex;
IndexScanDesc ScanDesc;
RetrieveIndexResult ScanResult;
HeapTupleData LocalHeapTuple;
Buffer LocalBuffer;
Oid OIDNewHeapInsert;
/*
* Open the relations I need. Scan through the OldHeap on the OldIndex

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.44 1999/05/10 00:44:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.45 1999/05/25 16:08:17 momjian Exp $
*
* NOTES
* The PortalExecutorHeapMemory crap needs to be eliminated
@ -117,18 +117,18 @@ PerformPortalFetch(char *name,
}
/* ----------------
* Create a const node from the given count value
* Create a const node from the given count value
* ----------------
*/
memset(&limcount, 0, sizeof(limcount));
limcount.type = T_Const;
limcount.consttype = INT4OID;
limcount.constlen = sizeof(int4);
limcount.constvalue = (Datum)count;
limcount.constisnull = FALSE;
limcount.type = T_Const;
limcount.consttype = INT4OID;
limcount.constlen = sizeof(int4);
limcount.constvalue = (Datum) count;
limcount.constisnull = FALSE;
limcount.constbyval = TRUE;
limcount.constisset = FALSE;
limcount.constiscast = FALSE;
limcount.constiscast = FALSE;
/* ----------------
@ -193,8 +193,8 @@ PerformPortalFetch(char *name,
*/
PortalExecutorHeapMemory = (MemoryContext) PortalGetHeapMemory(portal);
ExecutorRun(queryDesc, PortalGetState(portal), feature,
(Node *)NULL, (Node *)&limcount);
ExecutorRun(queryDesc, PortalGetState(portal), feature,
(Node *) NULL, (Node *) &limcount);
if (dest == None) /* MOVE */
pfree(queryDesc);
@ -211,7 +211,7 @@ PerformPortalFetch(char *name,
* ----------------
*/
MemoryContextSwitchTo(
(MemoryContext) PortalGetHeapMemory(GetPortalByName(NULL)));
(MemoryContext) PortalGetHeapMemory(GetPortalByName(NULL)));
}
/* --------------------------------
@ -503,7 +503,7 @@ PerformAddAttribute(char *relationName,
heap_replace(rel, &reltup->t_self, reltup, NULL);
{
HeapTuple temptup;
HeapTuple temptup;
if ((temptup = get_temp_rel_by_name(relationName)) != NULL)
((Form_pg_class) GETSTRUCT(temptup))->relnatts = maxatts;
@ -519,7 +519,7 @@ PerformAddAttribute(char *relationName,
}
void
LockTableCommand(LockStmt *lockstmt)
LockTableCommand(LockStmt * lockstmt)
{
Relation rel;
int aclresult;

View File

@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.76 1999/05/10 00:44:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.77 1999/05/25 16:08:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,111 +70,138 @@ static int CountTuples(Relation relation);
static int lineno;
/*
/*
* Internal communications functions
*/
inline void CopySendData(void *databuf, int datasize, FILE *fp);
inline void CopySendString(char *str, FILE *fp);
inline void CopySendChar(char c, FILE *fp);
inline void CopyGetData(void *databuf, int datasize, FILE *fp);
inline int CopyGetChar(FILE *fp);
inline int CopyGetEof(FILE *fp);
inline int CopyPeekChar(FILE *fp);
inline int CopyGetChar(FILE *fp);
inline int CopyGetEof(FILE *fp);
inline int CopyPeekChar(FILE *fp);
inline void CopyDonePeek(FILE *fp, int c, int pickup);
/*
* CopySendData sends output data either to the file
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
*
* CopySendString does the same for null-terminated strings
* CopySendChar does the same for single characters
*
* NB: no data conversion is applied by these functions
*/
inline void CopySendData(void *databuf, int datasize, FILE *fp) {
if (!fp)
pq_putbytes((char*) databuf, datasize);
else
fwrite(databuf, datasize, 1, fp);
}
inline void CopySendString(char *str, FILE *fp) {
CopySendData(str,strlen(str),fp);
inline void
CopySendData(void *databuf, int datasize, FILE *fp)
{
if (!fp)
pq_putbytes((char *) databuf, datasize);
else
fwrite(databuf, datasize, 1, fp);
}
inline void CopySendChar(char c, FILE *fp) {
CopySendData(&c,1,fp);
inline void
CopySendString(char *str, FILE *fp)
{
CopySendData(str, strlen(str), fp);
}
inline void
CopySendChar(char c, FILE *fp)
{
CopySendData(&c, 1, fp);
}
/*
* CopyGetData reads output data either from the file
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
*
* CopyGetChar does the same for single characters
* CopyGetEof checks if it's EOF on the input
*
* NB: no data conversion is applied by these functions
*/
inline void CopyGetData(void *databuf, int datasize, FILE *fp) {
if (!fp)
pq_getbytes((char*) databuf, datasize);
else
fread(databuf, datasize, 1, fp);
inline void
CopyGetData(void *databuf, int datasize, FILE *fp)
{
if (!fp)
pq_getbytes((char *) databuf, datasize);
else
fread(databuf, datasize, 1, fp);
}
inline int CopyGetChar(FILE *fp) {
if (!fp)
{
unsigned char ch;
if (pq_getbytes((char*) &ch, 1))
return EOF;
return ch;
}
else
return getc(fp);
inline int
CopyGetChar(FILE *fp)
{
if (!fp)
{
unsigned char ch;
if (pq_getbytes((char *) &ch, 1))
return EOF;
return ch;
}
else
return getc(fp);
}
inline int CopyGetEof(FILE *fp) {
if (!fp)
return 0; /* Never return EOF when talking to frontend ? */
else
return feof(fp);
inline int
CopyGetEof(FILE *fp)
{
if (!fp)
return 0; /* Never return EOF when talking to
* frontend ? */
else
return feof(fp);
}
/*
* CopyPeekChar reads a byte in "peekable" mode.
* after each call to CopyPeekChar, a call to CopyDonePeek _must_
* follow.
* CopyDonePeek will either take the peeked char off the steam
* CopyDonePeek will either take the peeked char off the steam
* (if pickup is != 0) or leave it on the stream (if pickup == 0)
*/
inline int CopyPeekChar(FILE *fp) {
if (!fp)
return pq_peekbyte();
else
return getc(fp);
inline int
CopyPeekChar(FILE *fp)
{
if (!fp)
return pq_peekbyte();
else
return getc(fp);
}
inline void CopyDonePeek(FILE *fp, int c, int pickup) {
if (!fp) {
if (pickup) {
/* We want to pick it up - just receive again into dummy buffer */
char c;
pq_getbytes(&c, 1);
}
/* If we didn't want to pick it up, just leave it where it sits */
}
else {
if (!pickup) {
/* We don't want to pick it up - so put it back in there */
ungetc(c,fp);
}
/* If we wanted to pick it up, it's already there */
}
inline void
CopyDonePeek(FILE *fp, int c, int pickup)
{
if (!fp)
{
if (pickup)
{
/*
* We want to pick it up - just receive again into dummy
* buffer
*/
char c;
pq_getbytes(&c, 1);
}
/* If we didn't want to pick it up, just leave it where it sits */
}
else
{
if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c, fp);
}
/* If we wanted to pick it up, it's already there */
}
}
/*
@ -317,7 +344,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
else if (!from)
{
if (!binary)
CopySendData("\\.\n",3,fp);
CopySendData("\\.\n", 3, fp);
if (IsUnderPostmaster)
pq_endcopyout(false);
}
@ -395,8 +422,8 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
if (oids && !binary)
{
CopySendString(oidout(tuple->t_data->t_oid),fp);
CopySendChar(delim[0],fp);
CopySendString(oidout(tuple->t_data->t_oid), fp);
CopySendChar(delim[0], fp);
}
for (i = 0; i < attr_count; i++)
@ -466,8 +493,8 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
}
}
CopySendData((char *) tuple->t_data + tuple->t_data->t_hoff,
length, fp);
CopySendData((char *) tuple->t_data + tuple->t_data->t_hoff,
length, fp);
}
}
@ -521,7 +548,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
Node **indexPred = NULL;
TupleDesc rtupdesc;
ExprContext *econtext = NULL;
EState *estate = makeNode(EState); /* for ExecConstraints() */
EState *estate = makeNode(EState); /* for ExecConstraints() */
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
@ -566,11 +593,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
itupdescArr[i] = RelationGetDescr(index_rels[i]);
pgIndexTup = SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(index_rels[i])),
0, 0, 0);
0, 0, 0);
Assert(pgIndexTup);
pgIndexP[i] = (Form_pg_index) GETSTRUCT(pgIndexTup);
for (attnumP = &(pgIndexP[i]->indkey[0]), natts = 0;
natts < INDEX_MAX_KEYS && *attnumP != InvalidAttrNumber;
natts < INDEX_MAX_KEYS && *attnumP != InvalidAttrNumber;
attnumP++, natts++);
if (pgIndexP[i]->indproc != InvalidOid)
{
@ -777,7 +804,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
else if (nulls[i] != 'n')
{
ptr = (char *)att_align(ptr, attr[i]->attlen, attr[i]->attalign);
ptr = (char *) att_align(ptr, attr[i]->attlen, attr[i]->attalign);
values[i] = (Datum) ptr;
ptr = att_addlength(ptr, attr[i]->attlen, ptr);
}
@ -888,7 +915,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
pfree(index_nulls);
pfree(idatum);
pfree(byval);
if (!binary)
{
pfree(in_functions);
@ -903,7 +930,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
{
if (index_rels[i] == NULL)
continue;
if ((index_rels[i])->rd_rel->relam != BTREE_AM_OID &&
if ((index_rels[i])->rd_rel->relam != BTREE_AM_OID &&
(index_rels[i])->rd_rel->relam != HASH_AM_OID)
UnlockRelation(index_rels[i], AccessExclusiveLock);
index_close(index_rels[i]);
@ -1022,12 +1049,12 @@ GetIndexRelations(Oid main_relation_oid,
{
index_relation_oid = (Oid) DatumGetInt32(heap_getattr(tuple, 2,
tupDesc, &isnull));
tupDesc, &isnull));
if (index_relation_oid == main_relation_oid)
{
scan->index_rel_oid = (Oid) DatumGetInt32(heap_getattr(tuple,
Anum_pg_index_indexrelid,
tupDesc, &isnull));
Anum_pg_index_indexrelid,
tupDesc, &isnull));
(*n_indices)++;
scan->next = (RelationList *) palloc(sizeof(RelationList));
scan = scan->next;
@ -1047,7 +1074,7 @@ GetIndexRelations(Oid main_relation_oid,
{
(*index_rels)[i] = index_open(scan->index_rel_oid);
/* comments in execUtils.c */
if ((*index_rels)[i] != NULL &&
if ((*index_rels)[i] != NULL &&
((*index_rels)[i])->rd_rel->relam != BTREE_AM_OID &&
((*index_rels)[i])->rd_rel->relam != HASH_AM_OID)
LockRelation((*index_rels)[i], AccessExclusiveLock);
@ -1176,26 +1203,29 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
if (ISOCTAL(c))
{
val = (val << 3) + VALUE(c);
CopyDonePeek(fp, c, 1); /* Pick up the character! */
CopyDonePeek(fp, c, 1); /* Pick up the
* character! */
c = CopyPeekChar(fp);
if (ISOCTAL(c)) {
CopyDonePeek(fp,c,1); /* pick up! */
if (ISOCTAL(c))
{
CopyDonePeek(fp, c, 1); /* pick up! */
val = (val << 3) + VALUE(c);
}
else
{
if (CopyGetEof(fp)) {
CopyDonePeek(fp,c,1); /* pick up */
if (CopyGetEof(fp))
{
CopyDonePeek(fp, c, 1); /* pick up */
return NULL;
}
CopyDonePeek(fp,c,0); /* Return to stream! */
CopyDonePeek(fp, c, 0); /* Return to stream! */
}
}
else
{
if (CopyGetEof(fp))
return NULL;
CopyDonePeek(fp,c,0); /* Return to stream! */
CopyDonePeek(fp, c, 0); /* Return to stream! */
}
c = val & 0377;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.40 1999/02/13 23:15:05 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.41 1999/05/25 16:08:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,7 +39,7 @@ static List *MergeAttributes(List *schema, List *supers, List **supconstr);
static void StoreCatalogInheritance(Oid relationId, List *supers);
/* ----------------------------------------------------------------
* DefineRelation
* DefineRelation
* Creates a new relation.
* ----------------------------------------------------------------
*/
@ -90,10 +90,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (constraints != NIL)
{
List *entry;
int nconstr = length(constraints),
ncheck = 0,
i;
List *entry;
int nconstr = length(constraints),
ncheck = 0,
i;
ConstrCheck *check = (ConstrCheck *) palloc(nconstr * sizeof(ConstrCheck));
foreach(entry, constraints)
@ -107,9 +107,9 @@ DefineRelation(CreateStmt *stmt, char relkind)
for (i = 0; i < ncheck; i++)
{
if (strcmp(check[i].ccname, cdef->name) == 0)
elog(ERROR,
"DefineRelation: name (%s) of CHECK constraint duplicated",
cdef->name);
elog(ERROR,
"DefineRelation: name (%s) of CHECK constraint duplicated",
cdef->name);
}
check[ncheck].ccname = cdef->name;
}
@ -145,7 +145,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
}
/*
* RemoveRelation
* RemoveRelation
* Deletes a new relation.
*
* Exceptions:
@ -164,7 +164,7 @@ RemoveRelation(char *name)
/*
* MergeAttributes
* MergeAttributes
* Returns new schema given initial schema and supers.
*
*
@ -276,8 +276,8 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
*/
attributeName = (attribute->attname).data;
tuple = SearchSysCacheTuple(TYPOID,
ObjectIdGetDatum(attribute->atttypid),
0, 0, 0);
ObjectIdGetDatum(attribute->atttypid),
0, 0, 0);
Assert(HeapTupleIsValid(tuple));
attributeType = (((Form_pg_type) GETSTRUCT(tuple))->typname).data;
@ -365,7 +365,7 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
}
/*
* StoreCatalogInheritance
* StoreCatalogInheritance
* Updates the system catalogs with proper inheritance information.
*/
static void
@ -411,9 +411,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
*/
idList = lappendi(idList, tuple->t_data->t_oid);
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(tuple->t_data->t_oid); /* inhparent */
datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(tuple->t_data->t_oid); /* inhparent */
datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
nullarr[1] = ' ';
@ -467,8 +467,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
break;
lnext(current) = lconsi(((Form_pg_inherits)
GETSTRUCT(tuple))->inhparent,
NIL);
GETSTRUCT(tuple))->inhparent,
NIL);
current = lnext(current);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.34 1999/05/10 00:44:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.35 1999/05/25 16:08:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -47,8 +47,8 @@ createdb(char *dbname, char *dbpath, int encoding, CommandDest dest)
Oid db_id;
int4 user_id;
char buf[512];
char *lp,
loc[512];
char *lp,
loc[512];
/*
* If this call returns, the database does not exist and we're allowed
@ -80,13 +80,13 @@ createdb(char *dbname, char *dbpath, int encoding, CommandDest dest)
elog(ERROR, "Unable to create database directory '%s'", lp);
snprintf(buf, 512, "%s %s%cbase%ctemplate1%c* %s",
COPY_CMD, DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, lp);
COPY_CMD, DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, lp);
system(buf);
snprintf(buf, 512,
"insert into pg_database (datname, datdba, encoding, datpath)"
" values ('%s', '%d', '%d', '%s');", dbname, user_id, encoding,
loc);
snprintf(buf, 512,
"insert into pg_database (datname, datdba, encoding, datpath)"
" values ('%s', '%d', '%d', '%s');", dbname, user_id, encoding,
loc);
pg_exec_query_dest(buf, dest, false);
}
@ -96,9 +96,9 @@ destroydb(char *dbname, CommandDest dest)
{
int4 user_id;
Oid db_id;
char *path,
dbpath[MAXPGPATH + 1],
buf[512];
char *path,
dbpath[MAXPGPATH + 1],
buf[512];
/*
* If this call returns, the database exists and we're allowed to
@ -122,9 +122,9 @@ destroydb(char *dbname, CommandDest dest)
* remove the pg_database tuple FIRST, this may fail due to
* permissions problems
*/
snprintf(buf, 512,
"delete from pg_database where pg_database.oid = \'%u\'::oid", db_id);
pg_exec_query_dest(buf ,dest, false);
snprintf(buf, 512,
"delete from pg_database where pg_database.oid = \'%u\'::oid", db_id);
pg_exec_query_dest(buf, dest, false);
/* drop pages for this database that are in the shared buffer cache */
DropBuffers(db_id);
@ -294,13 +294,13 @@ static void
stop_vacuum(char *dbpath, char *dbname)
{
char filename[256];
FILE *fp;
FILE *fp;
int pid;
if (strchr(dbpath, SEP_CHAR) != 0)
{
snprintf(filename, 256, "%s%cbase%c%s%c%s.vacuum",
DataDir, SEP_CHAR, SEP_CHAR, dbname, SEP_CHAR, dbname);
snprintf(filename, 256, "%s%cbase%c%s%c%s.vacuum",
DataDir, SEP_CHAR, SEP_CHAR, dbname, SEP_CHAR, dbname);
}
else
snprintf(filename, 256, "%s%c%s.vacuum", dbpath, SEP_CHAR, dbname);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.28 1999/04/09 22:35:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.29 1999/05/25 16:08:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -203,7 +203,7 @@ interpret_AS_clause(const char *languageName, const char *as,
/*
* CreateFunction
* CreateFunction
* Execute a CREATE FUNCTION utility statement.
*
*/
@ -574,7 +574,7 @@ DefineAggregate(char *aggName, List *parameters)
}
/*
* DefineType
* DefineType
* Registers a new type.
*
*/

View File

@ -4,7 +4,7 @@
*
* Copyright (c) 1994-5, Regents of the University of California
*
* $Id: explain.c,v 1.36 1999/05/09 23:31:45 tgl Exp $
* $Id: explain.c,v 1.37 1999/05/25 16:08:23 momjian Exp $
*
*/
#include <stdio.h>
@ -34,7 +34,7 @@ typedef struct ExplainState
} ExplainState;
static char *Explain_PlanToString(Plan *plan, ExplainState *es);
static void printLongNotice(const char * header, const char * message);
static void printLongNotice(const char *header, const char *message);
static void ExplainOneQuery(Query *query, bool verbose, CommandDest dest);
@ -46,8 +46,8 @@ static void ExplainOneQuery(Query *query, bool verbose, CommandDest dest);
void
ExplainQuery(Query *query, bool verbose, CommandDest dest)
{
List *rewritten;
List *l;
List *rewritten;
List *l;
/* rewriter and planner may not work in aborted state? */
if (IsAbortedTransactionBlockState())
@ -145,10 +145,10 @@ ExplainOneQuery(Query *query, bool verbose, CommandDest dest)
static void
explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
{
List *l;
List *l;
Relation relation;
char *pname;
int i;
char *pname;
int i;
if (plan == NULL)
{
@ -208,15 +208,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
case T_IndexScan:
appendStringInfo(str, " using ");
i = 0;
foreach (l, ((IndexScan *) plan)->indxid)
foreach(l, ((IndexScan *) plan)->indxid)
{
relation = RelationIdCacheGetRelation((int) lfirst(l));
if (++i > 1)
{
appendStringInfo(str, ", ");
}
appendStringInfo(str,
stringStringInfo((RelationGetRelationName(relation))->data));
appendStringInfo(str,
stringStringInfo((RelationGetRelationName(relation))->data));
}
case T_SeqScan:
if (((Scan *) plan)->scanrelid > 0)
@ -227,7 +225,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (strcmp(rte->refname, rte->relname) != 0)
{
appendStringInfo(str, "%s ",
stringStringInfo(rte->relname));
stringStringInfo(rte->relname));
}
appendStringInfo(str, stringStringInfo(rte->refname));
}
@ -238,7 +236,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (es->printCost)
{
appendStringInfo(str, " (cost=%.2f rows=%d width=%d)",
plan->cost, plan->plan_size, plan->plan_width);
plan->cost, plan->plan_size, plan->plan_width);
}
appendStringInfo(str, "\n");
@ -248,18 +246,14 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
List *saved_rtable = es->rtable;
List *lst;
for (i = 0; i < indent; i++)
{
for (i = 0; i < indent; i++)
appendStringInfo(str, " ");
}
appendStringInfo(str, " InitPlan\n");
foreach(lst, plan->initPlan)
{
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 2, es);
}
@ -270,9 +264,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (outerPlan(plan))
{
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, outerPlan(plan), indent + 3, es);
}
@ -281,9 +273,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (innerPlan(plan))
{
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, innerPlan(plan), indent + 3, es);
}
@ -295,17 +285,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
List *lst;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " SubPlan\n");
foreach(lst, plan->subPlan)
{
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 4, es);
}
@ -336,9 +322,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
es->rtable = nth(whichplan, appendplan->unionrtables);
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, subnode, indent + 4, es);
@ -353,7 +337,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
static char *
Explain_PlanToString(Plan *plan, ExplainState *es)
{
StringInfoData str;
StringInfoData str;
/* see stringinfo.h for an explanation of this maneuver */
initStringInfo(&str);
@ -367,9 +351,9 @@ Explain_PlanToString(Plan *plan, ExplainState *es)
* This is a crock ... there shouldn't be an upper limit to what you can elog().
*/
static void
printLongNotice(const char * header, const char * message)
printLongNotice(const char *header, const char *message)
{
int len = strlen(message);
int len = strlen(message);
elog(NOTICE, "%.20s%.*s", header, ELOG_MAXLEN - 64, message);
len -= ELOG_MAXLEN - 64;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.3 1999/05/10 00:44:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.4 1999/05/25 16:08:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -53,7 +53,7 @@ static void NormIndexAttrs(List *attList, AttrNumber *attNumP,
static char *GetDefaultOpClass(Oid atttypid);
/*
* DefineIndex
* DefineIndex
* Creates a new index.
*
* 'attributeList' is a list of IndexElem specifying either a functional
@ -164,7 +164,7 @@ DefineIndex(char *heapRelationName,
if (nargs > INDEX_MAX_KEYS)
{
elog(ERROR,
"Too many args to function, limit of %d", INDEX_MAX_KEYS);
"Too many args to function, limit of %d", INDEX_MAX_KEYS);
}
FIsetnArgs(&fInfo, nargs);
@ -207,7 +207,7 @@ DefineIndex(char *heapRelationName,
/*
* ExtendIndex
* ExtendIndex
* Extends a partial index.
*
* Exceptions:
@ -304,7 +304,7 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
predInfo->oldPred = oldPred;
attributeNumberA = (AttrNumber *) palloc(numberOfAttributes *
sizeof attributeNumberA[0]);
sizeof attributeNumberA[0]);
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof classObjectId[0]);
@ -501,7 +501,7 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
/* we just set the type name because that is all we need */
attribute->typename = makeNode(TypeName);
attribute->typename->name = nameout(&((Form_pg_type) GETSTRUCT(tuple))->typname);
/* we all need the typmod for the char and varchar types. */
attribute->typename->typmod = attform->atttypmod;
}
@ -547,7 +547,7 @@ GetDefaultOpClass(Oid atttypid)
}
/*
* RemoveIndex
* RemoveIndex
* Deletes an index.
*
* Exceptions:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.32 1999/02/13 23:15:08 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.33 1999/05/25 16:08:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -36,7 +36,7 @@
#endif
/*
* RemoveOperator
* RemoveOperator
* Deletes an operator.
*
* Exceptions:
@ -288,7 +288,7 @@ RemoveType(char *typeName) /* type name to be removed */
}
/*
* RemoveFunction
* RemoveFunction
* Deletes a function.
*
* Exceptions:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.24 1999/05/17 18:24:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.25 1999/05/25 16:08:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -142,7 +142,7 @@ renameatt(char *relname,
}
}
if ((relid = RelnameFindRelid(relname)) == InvalidOid)
elog(ERROR, "renameatt: relation \"%s\" nonexistent", relname);
@ -201,7 +201,7 @@ renameatt(char *relname,
void
renamerel(char *oldrelname, char *newrelname)
{
int i;
int i;
Relation relrelation; /* for RELATION relation */
HeapTuple oldreltup;
char oldpath[MAXPGPATH],
@ -237,7 +237,7 @@ renamerel(char *oldrelname, char *newrelname)
{
sprintf(toldpath, "%s.%d", oldpath, i);
sprintf(tnewpath, "%s.%d", newpath, i);
if(rename(toldpath, tnewpath) < 0)
if (rename(toldpath, tnewpath) < 0)
break;
}

View File

@ -66,7 +66,7 @@ static void init_params(CreateSeqStmt *seq, Form_pg_sequence new);
static int get_param(DefElem *def);
/*
* DefineSequence
* DefineSequence
* Creates a new sequence relation
*/
void
@ -218,8 +218,8 @@ nextval(struct varlena * seqin)
return elm->last;
}
seq = read_info("nextval", elm, &buf); /* lock page' buffer and read
* tuple */
seq = read_info("nextval", elm, &buf); /* lock page' buffer and
* read tuple */
next = result = seq->last_value;
incby = seq->increment_by;
@ -327,8 +327,8 @@ setval(struct varlena * seqin, int4 next)
/* open and AccessShareLock sequence */
elm = init_sequence("setval", seqname);
seq = read_info("setval", elm, &buf); /* lock page' buffer and read
* tuple */
seq = read_info("setval", elm, &buf); /* lock page' buffer and
* read tuple */
if (seq->cache_value != 1)
{
@ -361,11 +361,11 @@ setval(struct varlena * seqin, int4 next)
static Form_pg_sequence
read_info(char *caller, SeqTable elm, Buffer *buf)
{
PageHeader page;
ItemId lp;
HeapTupleData tuple;
PageHeader page;
ItemId lp;
HeapTupleData tuple;
sequence_magic *sm;
Form_pg_sequence seq;
Form_pg_sequence seq;
if (RelationGetNumberOfBlocks(elm->rel) != 1)
elog(ERROR, "%s.%s: invalid number of blocks in sequence",
@ -464,7 +464,7 @@ init_sequence(char *caller, char *name)
/*
* CloseSequences
* CloseSequences
* is calling by xact mgr at commit/abort.
*/
void

View File

@ -362,9 +362,9 @@ RelationBuildTriggers(Relation relation)
Form_pg_trigger pg_trigger;
Relation irel;
ScanKeyData skey;
HeapTupleData tuple;
IndexScanDesc sd;
RetrieveIndexResult indexRes;
HeapTupleData tuple;
IndexScanDesc sd;
RetrieveIndexResult indexRes;
Buffer buffer;
struct varlena *val;
bool isnull;
@ -659,14 +659,14 @@ ExecARInsertTriggers(Relation rel, HeapTuple trigtuple)
bool
ExecBRDeleteTriggers(EState *estate, ItemPointer tupleid)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_DELETE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_DELETE];
HeapTuple trigtuple;
HeapTuple newtuple = NULL;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_DELETE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_DELETE];
HeapTuple trigtuple;
HeapTuple newtuple = NULL;
TupleTableSlot *newSlot;
int i;
int i;
trigtuple = GetTupleForTrigger(estate, tupleid, &newSlot);
if (trigtuple == NULL)
@ -697,7 +697,7 @@ ExecBRDeleteTriggers(EState *estate, ItemPointer tupleid)
void
ExecARDeleteTriggers(EState *estate, ItemPointer tupleid)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE];
Trigger **trigger = rel->trigdesc->tg_after_row[TRIGGER_EVENT_DELETE];
@ -727,23 +727,23 @@ ExecARDeleteTriggers(EState *estate, ItemPointer tupleid)
HeapTuple
ExecBRUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_UPDATE];
HeapTuple trigtuple;
HeapTuple oldtuple;
HeapTuple intuple = newtuple;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_UPDATE];
HeapTuple trigtuple;
HeapTuple oldtuple;
HeapTuple intuple = newtuple;
TupleTableSlot *newSlot;
int i;
int i;
trigtuple = GetTupleForTrigger(estate, tupleid, &newSlot);
if (trigtuple == NULL)
return NULL;
/*
* In READ COMMITTED isolevel it's possible that newtuple
* was changed due to concurrent update.
* In READ COMMITTED isolevel it's possible that newtuple was changed
* due to concurrent update.
*/
if (newSlot != NULL)
intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot);
@ -772,7 +772,7 @@ ExecBRUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
void
ExecARUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE];
Trigger **trigger = rel->trigdesc->tg_after_row[TRIGGER_EVENT_UPDATE];
@ -799,22 +799,22 @@ ExecARUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
return;
}
extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti, ItemPointer tid);
extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti, ItemPointer tid);
static HeapTuple
GetTupleForTrigger(EState *estate, ItemPointer tid, TupleTableSlot **newSlot)
{
Relation relation = estate->es_result_relation_info->ri_RelationDesc;
HeapTupleData tuple;
HeapTuple result;
Buffer buffer;
Relation relation = estate->es_result_relation_info->ri_RelationDesc;
HeapTupleData tuple;
HeapTuple result;
Buffer buffer;
if (newSlot != NULL)
{
int test;
int test;
/*
* mark tuple for update
* mark tuple for update
*/
*newSlot = NULL;
tuple.t_self = *tid;
@ -824,7 +824,7 @@ ltrmark:;
{
case HeapTupleSelfUpdated:
ReleaseBuffer(buffer);
return(NULL);
return (NULL);
case HeapTupleMayBeUpdated:
break;
@ -835,9 +835,9 @@ ltrmark:;
elog(ERROR, "Can't serialize access due to concurrent update");
else if (!(ItemPointerEquals(&(tuple.t_self), tid)))
{
TupleTableSlot *epqslot = EvalPlanQual(estate,
estate->es_result_relation_info->ri_RangeTableIndex,
&(tuple.t_self));
TupleTableSlot *epqslot = EvalPlanQual(estate,
estate->es_result_relation_info->ri_RangeTableIndex,
&(tuple.t_self));
if (!(TupIsNull(epqslot)))
{
@ -846,23 +846,23 @@ ltrmark:;
goto ltrmark;
}
}
/*
* if tuple was deleted or PlanQual failed
* for updated tuple - we have not process
* this tuple!
/*
* if tuple was deleted or PlanQual failed for updated
* tuple - we have not process this tuple!
*/
return(NULL);
return (NULL);
default:
ReleaseBuffer(buffer);
elog(ERROR, "Unknown status %u from heap_mark4update", test);
return(NULL);
return (NULL);
}
}
else
{
PageHeader dp;
ItemId lp;
PageHeader dp;
ItemId lp;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));

View File

@ -5,11 +5,11 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: user.c,v 1.27 1999/04/02 06:16:36 tgl Exp $
* $Id: user.c,v 1.28 1999/05/25 16:08:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
@ -35,7 +35,7 @@
static void CheckPgUserAclNotNull(void);
#define SQL_LENGTH 512
#define SQL_LENGTH 512
/*---------------------------------------------------------------------
* UpdatePgPwdFile
@ -49,9 +49,9 @@ void
UpdatePgPwdFile(char *sql, CommandDest dest)
{
char *filename,
*tempname;
int bufsize;
char *filename,
*tempname;
int bufsize;
/*
* Create a temporary filename to be renamed later. This prevents the
@ -68,9 +68,9 @@ UpdatePgPwdFile(char *sql, CommandDest dest)
* SEPCHAR character as the delimiter between fields. Then rename the
* file to its final name.
*/
snprintf(sql, SQL_LENGTH,
"copy %s to '%s' using delimiters %s",
ShadowRelationName, tempname, CRYPT_PWD_FILE_SEPCHAR);
snprintf(sql, SQL_LENGTH,
"copy %s to '%s' using delimiters %s",
ShadowRelationName, tempname, CRYPT_PWD_FILE_SEPCHAR);
pg_exec_query_dest(sql, dest, false);
rename(tempname, filename);
pfree((void *) tempname);
@ -94,19 +94,19 @@ UpdatePgPwdFile(char *sql, CommandDest dest)
void
DefineUser(CreateUserStmt *stmt, CommandDest dest)
{
char *pg_shadow,
sql[SQL_LENGTH];
Relation pg_shadow_rel;
TupleDesc pg_shadow_dsc;
HeapScanDesc scan;
HeapTuple tuple;
Datum datum;
bool exists = false,
n,
inblock,
havepassword,
havevaluntil;
int max_id = -1;
char *pg_shadow,
sql[SQL_LENGTH];
Relation pg_shadow_rel;
TupleDesc pg_shadow_dsc;
HeapScanDesc scan;
HeapTuple tuple;
Datum datum;
bool exists = false,
n,
inblock,
havepassword,
havevaluntil;
int max_id = -1;
havepassword = stmt->password && stmt->password[0];
havevaluntil = stmt->validUntil && stmt->validUntil[0];
@ -161,21 +161,21 @@ DefineUser(CreateUserStmt *stmt, CommandDest dest)
UnlockRelation(pg_shadow_rel, AccessExclusiveLock);
heap_close(pg_shadow_rel);
UserAbortTransactionBlock();
elog(ERROR,
"defineUser: user \"%s\" has already been created", stmt->user);
elog(ERROR,
"defineUser: user \"%s\" has already been created", stmt->user);
return;
}
/*
* Build the insert statement to be executed.
*
* XXX Ugly as this code is, it still fails to cope with ' or \
* in any of the provided strings.
* XXX Ugly as this code is, it still fails to cope with ' or \ in any of
* the provided strings.
*/
snprintf(sql, SQL_LENGTH,
snprintf(sql, SQL_LENGTH,
"insert into %s (usename,usesysid,usecreatedb,usetrace,"
"usesuper,usecatupd,passwd,valuntil) "
"values('%s',%d,'%c','t','%c','t',%s%s%s,%s%s%s)",
"values('%s',%d,'%c','t','%c','t',%s%s%s,%s%s%s)",
ShadowRelationName,
stmt->user,
max_id + 1,
@ -216,12 +216,12 @@ extern void
AlterUser(AlterUserStmt *stmt, CommandDest dest)
{
char *pg_shadow,
sql[SQL_LENGTH];
char *pg_shadow,
sql[SQL_LENGTH];
Relation pg_shadow_rel;
TupleDesc pg_shadow_dsc;
HeapTuple tuple;
bool inblock;
bool inblock;
if (stmt->password)
CheckPgUserAclNotNull();
@ -272,34 +272,32 @@ AlterUser(AlterUserStmt *stmt, CommandDest dest)
snprintf(sql, SQL_LENGTH, "update %s set", ShadowRelationName);
if (stmt->password)
{
snprintf(sql, SQL_LENGTH, "%s passwd = '%s'", pstrdup(sql), stmt->password);
}
if (stmt->createdb)
{
snprintf(sql, SQL_LENGTH, "%s %susecreatedb='%s'",
pstrdup(sql), stmt->password ? "," : "",
*stmt->createdb ? "t" : "f");
pstrdup(sql), stmt->password ? "," : "",
*stmt->createdb ? "t" : "f");
}
if (stmt->createuser)
{
snprintf(sql, SQL_LENGTH, "%s %susesuper='%s'",
pstrdup(sql), (stmt->password || stmt->createdb) ? "," : "",
*stmt->createuser ? "t" : "f");
pstrdup(sql), (stmt->password || stmt->createdb) ? "," : "",
*stmt->createuser ? "t" : "f");
}
if (stmt->validUntil)
{
snprintf(sql, SQL_LENGTH, "%s %svaluntil='%s'",
pstrdup(sql),
(stmt->password || stmt->createdb || stmt->createuser) ? "," : "",
stmt->validUntil);
pstrdup(sql),
(stmt->password || stmt->createdb || stmt->createuser) ? "," : "",
stmt->validUntil);
}
snprintf(sql, SQL_LENGTH, "%s where usename = '%s'",
pstrdup(sql), stmt->user);
pstrdup(sql), stmt->user);
pg_exec_query_dest(sql, dest, false);
@ -393,8 +391,8 @@ RemoveUser(char *user, CommandDest dest)
datum = heap_getattr(tuple, Anum_pg_database_datname, pg_dsc, &n);
if (memcmp((void *) datum, "template1", 9))
{
dbase =
(char **) repalloc((void *) dbase, sizeof(char *) * (ndbase + 1));
dbase =
(char **) repalloc((void *) dbase, sizeof(char *) * (ndbase + 1));
dbase[ndbase] = (char *) palloc(NAMEDATALEN + 1);
memcpy((void *) dbase[ndbase], (void *) datum, NAMEDATALEN);
dbase[ndbase++][NAMEDATALEN] = '\0';
@ -435,8 +433,8 @@ RemoveUser(char *user, CommandDest dest)
/*
* Remove the user from the pg_shadow table
*/
snprintf(sql, SQL_LENGTH,
"delete from %s where usename = '%s'", ShadowRelationName, user);
snprintf(sql, SQL_LENGTH,
"delete from %s where usename = '%s'", ShadowRelationName, user);
pg_exec_query_dest(sql, dest, false);
UpdatePgPwdFile(sql, dest);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.103 1999/05/23 09:10:24 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.104 1999/05/25 16:08:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,7 +66,7 @@ static Portal vc_portal;
static int MESSAGE_LEVEL; /* message level */
static TransactionId XmaxRecent;
static TransactionId XmaxRecent;
#define swapLong(a,b) {long tmp; tmp=a; a=b; b=tmp;}
#define swapInt(a,b) {int tmp; tmp=a; a=b; b=tmp;}
@ -101,8 +101,8 @@ static void vc_free(VRelList vrl);
static void vc_getindices(Oid relid, int *nindices, Relation **Irel);
static void vc_clsindices(int nindices, Relation *Irel);
static void vc_mkindesc(Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc);
static void *vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *));
static void *vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *));
static int vc_cmp_blk(const void *left, const void *right);
static int vc_cmp_offno(const void *left, const void *right);
static int vc_cmp_vtlinks(const void *left, const void *right);
@ -222,14 +222,15 @@ vc_shutdown()
{
/* on entry, we are not in a transaction */
/* Flush the init file that relcache.c uses to save startup time.
* The next backend startup will rebuild the init file with up-to-date
* information from pg_class. This lets the optimizer see the stats that
* we've collected for certain critical system indexes. See relcache.c
* for more details.
/*
* Flush the init file that relcache.c uses to save startup time. The
* next backend startup will rebuild the init file with up-to-date
* information from pg_class. This lets the optimizer see the stats
* that we've collected for certain critical system indexes. See
* relcache.c for more details.
*
* Ignore any failure to unlink the file, since it might not be there
* if no backend has been started since the last vacuum...
* Ignore any failure to unlink the file, since it might not be there if
* no backend has been started since the last vacuum...
*/
unlink(RELCACHE_INIT_FILENAME);
@ -578,7 +579,7 @@ vc_vacone(Oid relid, bool analyze, List *va_cols)
/* update statistics in pg_class */
vc_updstats(vacrelstats->relid, vacrelstats->num_pages,
vacrelstats->num_tuples, vacrelstats->hasindex, vacrelstats);
vacrelstats->num_tuples, vacrelstats->hasindex, vacrelstats);
/* next command frees attribute stats */
CommitTransactionCommand();
@ -601,7 +602,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
blkno;
ItemId itemid;
Buffer buf;
HeapTupleData tuple;
HeapTupleData tuple;
Page page,
tempPage = NULL;
OffsetNumber offnum,
@ -712,7 +713,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
else if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (TransactionIdDidCommit((TransactionId)
tuple.t_data->t_cmin))
tuple.t_data->t_cmin))
{
tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
tupgone = true;
@ -759,7 +760,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
else
{
elog(NOTICE, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
relname, blkno, offnum, tuple.t_data->t_xmin);
relname, blkno, offnum, tuple.t_data->t_xmin);
do_shrinking = false;
}
}
@ -799,6 +800,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
}
else if (!TransactionIdIsInProgress(tuple.t_data->t_xmax))
{
/*
* Not Aborted, Not Committed, Not in Progress - so it
* from crashed process. - vadim 06/02/97
@ -812,11 +814,12 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
relname, blkno, offnum, tuple.t_data->t_xmax);
do_shrinking = false;
}
/*
* If tuple is recently deleted then
* we must not remove it from relation.
* If tuple is recently deleted then we must not remove it
* from relation.
*/
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
{
tupgone = false;
@ -826,20 +829,21 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
tuple.t_data->t_infomask |= HEAP_XMAX_COMMITTED;
pgchanged = true;
}
/*
* If we do shrinking and this tuple is updated one
* then remember it to construct updated tuple
* dependencies.
*/
if (do_shrinking && !(ItemPointerEquals(&(tuple.t_self),
&(tuple.t_data->t_ctid))))
if (do_shrinking && !(ItemPointerEquals(&(tuple.t_self),
&(tuple.t_data->t_ctid))))
{
if (free_vtlinks == 0)
{
free_vtlinks = 1000;
vtlinks = (VTupleLink) repalloc(vtlinks,
(free_vtlinks + num_vtlinks) *
sizeof(VTupleLinkData));
vtlinks = (VTupleLink) repalloc(vtlinks,
(free_vtlinks + num_vtlinks) *
sizeof(VTupleLinkData));
}
vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
vtlinks[num_vtlinks].this_tid = tuple.t_self;
@ -962,8 +966,8 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
if (usable_free_size > 0 && num_vtlinks > 0)
{
qsort((char *) vtlinks, num_vtlinks, sizeof (VTupleLinkData),
vc_cmp_vtlinks);
qsort((char *) vtlinks, num_vtlinks, sizeof(VTupleLinkData),
vc_cmp_vtlinks);
vacrelstats->vtlinks = vtlinks;
vacrelstats->num_vtlinks = num_vtlinks;
}
@ -980,10 +984,10 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
Tup %u: Vac %u, Keep/VTL %u/%u, Crash %u, UnUsed %u, MinLen %u, MaxLen %u; \
Re-using: Free/Avail. Space %u/%u; EndEmpty/Avail. Pages %u/%u. \
Elapsed %u/%u sec.",
nblocks, changed_pages, vacuum_pages->vpl_num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
nunused, min_tlen, max_tlen, free_size, usable_free_size,
nblocks, changed_pages, vacuum_pages->vpl_num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
nunused, min_tlen, max_tlen, free_size, usable_free_size,
empty_end_pages, fraged_pages->vpl_num_pages,
ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
@ -1019,8 +1023,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
max_offset;
ItemId itemid,
newitemid;
HeapTupleData tuple,
newtup;
HeapTupleData tuple,
newtup;
TupleDesc tupdesc = NULL;
Datum *idatum = NULL;
char *inulls = NULL;
@ -1128,7 +1132,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
else
Assert(!isempty);
chain_tuple_moved = false; /* no one chain-tuple was moved off this page, yet */
chain_tuple_moved = false; /* no one chain-tuple was moved
* off this page, yet */
vpc->vpd_blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
@ -1146,28 +1151,30 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
/*
* If this (chain) tuple is moved by me already then
* I have to check is it in vpc or not - i.e. is it
* moved while cleaning this page or some previous one.
/*
* If this (chain) tuple is moved by me already then I
* have to check is it in vpc or not - i.e. is it moved
* while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (keep_tuples == 0)
continue;
if (chain_tuple_moved) /* some chains was moved while */
{ /* cleaning this page */
if (chain_tuple_moved) /* some chains was moved
* while */
{ /* cleaning this page */
Assert(vpc->vpd_offsets_free > 0);
for (i = 0; i < vpc->vpd_offsets_free; i++)
{
if (vpc->vpd_offsets[i] == offnum)
break;
}
if (i >= vpc->vpd_offsets_free) /* not found */
if (i >= vpc->vpd_offsets_free) /* not found */
{
vpc->vpd_offsets[vpc->vpd_offsets_free++] = offnum;
keep_tuples--;
@ -1184,29 +1191,29 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
}
/*
* If this tuple is in the chain of tuples created in
* updates by "recent" transactions then we have to
* move all chain of tuples to another places.
* If this tuple is in the chain of tuples created in updates
* by "recent" transactions then we have to move all chain of
* tuples to another places.
*/
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
tuple.t_data->t_xmin >= XmaxRecent) ||
(!(tuple.t_data->t_infomask & HEAP_XMAX_INVALID) &&
(!(tuple.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tuple.t_self), &(tuple.t_data->t_ctid)))))
{
Buffer Cbuf = buf;
Page Cpage;
ItemId Citemid;
ItemPointerData Ctid;
HeapTupleData tp = tuple;
Size tlen = tuple_len;
VTupleMove vtmove = (VTupleMove)
palloc(100 * sizeof(VTupleMoveData));
int num_vtmove = 0;
int free_vtmove = 100;
VPageDescr to_vpd = fraged_pages->vpl_pagedesc[0];
int to_item = 0;
bool freeCbuf = false;
int ti;
Buffer Cbuf = buf;
Page Cpage;
ItemId Citemid;
ItemPointerData Ctid;
HeapTupleData tp = tuple;
Size tlen = tuple_len;
VTupleMove vtmove = (VTupleMove)
palloc(100 * sizeof(VTupleMoveData));
int num_vtmove = 0;
int free_vtmove = 100;
VPageDescr to_vpd = fraged_pages->vpl_pagedesc[0];
int to_item = 0;
bool freeCbuf = false;
int ti;
if (vacrelstats->vtlinks == NULL)
elog(ERROR, "No one parent tuple was found");
@ -1215,22 +1222,23 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
WriteBuffer(cur_buffer);
cur_buffer = InvalidBuffer;
}
/*
* If this tuple is in the begin/middle of the chain
* then we have to move to the end of chain.
* If this tuple is in the begin/middle of the chain then
* we have to move to the end of chain.
*/
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
{
Ctid = tp.t_data->t_ctid;
if (freeCbuf)
ReleaseBuffer(Cbuf);
freeCbuf = true;
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&Ctid));
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&Ctid));
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&Ctid));
Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&Ctid));
if (!ItemIdIsUsed(Citemid))
elog(ERROR, "Child itemid marked as unused");
tp.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
@ -1238,16 +1246,16 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
tlen = tp.t_len = ItemIdGetLength(Citemid);
}
/* first, can chain be moved ? */
for ( ; ; )
for (;;)
{
if (!vc_enough_space(to_vpd, tlen))
{
if (to_vpd != last_fraged_page &&
!vc_enough_space(to_vpd, vacrelstats->min_tlen))
!vc_enough_space(to_vpd, vacrelstats->min_tlen))
{
Assert(num_fraged_pages > to_item + 1);
memmove(fraged_pages->vpl_pagedesc + to_item,
fraged_pages->vpl_pagedesc + to_item + 1,
fraged_pages->vpl_pagedesc + to_item + 1,
sizeof(VPageDescr *) * (num_fraged_pages - to_item - 1));
num_fraged_pages--;
Assert(last_fraged_page == fraged_pages->vpl_pagedesc[num_fraged_pages - 1]);
@ -1257,7 +1265,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (vc_enough_space(fraged_pages->vpl_pagedesc[i], tlen))
break;
}
if (i == num_fraged_pages) /* can't move item anywhere */
if (i == num_fraged_pages) /* can't move item
* anywhere */
{
for (i = 0; i < num_vtmove; i++)
{
@ -1277,9 +1286,9 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (free_vtmove == 0)
{
free_vtmove = 1000;
vtmove = (VTupleMove) repalloc(vtmove,
(free_vtmove + num_vtmove) *
sizeof(VTupleMoveData));
vtmove = (VTupleMove) repalloc(vtmove,
(free_vtmove + num_vtmove) *
sizeof(VTupleMoveData));
}
vtmove[num_vtmove].tid = tp.t_self;
vtmove[num_vtmove].vpd = to_vpd;
@ -1289,56 +1298,59 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
vtmove[num_vtmove].cleanVpd = false;
free_vtmove--;
num_vtmove++;
/*
* All done ?
*/
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
tp.t_data->t_xmin < XmaxRecent)
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
tp.t_data->t_xmin < XmaxRecent)
break;
/*
* Well, try to find tuple with old row version
*/
for ( ; ; )
for (;;)
{
Buffer Pbuf;
Page Ppage;
ItemId Pitemid;
HeapTupleData Ptp;
VTupleLinkData vtld,
*vtlp;
Buffer Pbuf;
Page Ppage;
ItemId Pitemid;
HeapTupleData Ptp;
VTupleLinkData vtld,
*vtlp;
vtld.new_tid = tp.t_self;
vtlp = (VTupleLink)
vc_find_eq((void *) (vacrelstats->vtlinks),
vacrelstats->num_vtlinks,
sizeof(VTupleLinkData),
(void *) &vtld,
vc_cmp_vtlinks);
vtlp = (VTupleLink)
vc_find_eq((void *) (vacrelstats->vtlinks),
vacrelstats->num_vtlinks,
sizeof(VTupleLinkData),
(void *) &vtld,
vc_cmp_vtlinks);
if (vtlp == NULL)
elog(ERROR, "Parent tuple was not found");
tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel,
Pbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self)));
Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self)));
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "Parent itemid marked as unused");
Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
Assert(Ptp.t_data->t_xmax == tp.t_data->t_xmin);
/*
* If this tuple is updated version of row and
* it was created by the same transaction then
* no one is interested in this tuple -
* mark it as removed.
* If this tuple is updated version of row and it
* was created by the same transaction then no one
* is interested in this tuple - mark it as
* removed.
*/
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
Ptp.t_data->t_xmin == Ptp.t_data->t_xmax)
{
TransactionIdStore(myXID,
(TransactionId*) &(Ptp.t_data->t_cmin));
Ptp.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
TransactionIdStore(myXID,
(TransactionId *) &(Ptp.t_data->t_cmin));
Ptp.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
Ptp.t_data->t_infomask |= HEAP_MOVED_OFF;
WriteBuffer(Pbuf);
continue;
@ -1354,7 +1366,7 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
}
if (freeCbuf)
ReleaseBuffer(Cbuf);
if (num_vtmove == 0) /* chain can't be moved */
if (num_vtmove == 0) /* chain can't be moved */
{
pfree(vtmove);
break;
@ -1364,19 +1376,20 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
{
/* Get tuple from chain */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tuple.t_self)));
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tuple.t_self)));
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&(tuple.t_self)));
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
/* Get page to move in */
cur_buffer = ReadBuffer(onerel, vtmove[ti].vpd->vpd_blkno);
/*
* We should LockBuffer(cur_buffer) but don't, at the
* moment. If you'll do LockBuffer then UNLOCK it
* before index_insert: unique btree-s call heap_fetch
* We should LockBuffer(cur_buffer) but don't, at the
* moment. If you'll do LockBuffer then UNLOCK it
* before index_insert: unique btree-s call heap_fetch
* to get t_infomask of inserted heap tuple !!!
*/
ToPage = BufferGetPage(cur_buffer);
@ -1385,22 +1398,23 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
vc_vacpage(ToPage, vtmove[ti].vpd);
heap_copytuple_with_tuple(&tuple, &newtup);
RelationInvalidateHeapTuple(onerel, &tuple);
TransactionIdStore(myXID, (TransactionId*) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_OFF);
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
newtup.t_data->t_infomask |= HEAP_MOVED_IN;
newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len,
InvalidOffsetNumber, LP_USED);
InvalidOffsetNumber, LP_USED);
if (newoff == InvalidOffsetNumber)
{
elog(ERROR, "\
moving chain: failed to add item with len = %u to page %u",
tuple_len, vtmove[ti].vpd->vpd_blkno);
tuple_len, vtmove[ti].vpd->vpd_blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
ItemPointerSet(&(newtup.t_self), vtmove[ti].vpd->vpd_blkno, newoff);
/*
* Set t_ctid pointing to itself for last tuple in
* chain and to next tuple in chain otherwise.
@ -1411,19 +1425,20 @@ moving chain: failed to add item with len = %u to page %u",
newtup.t_data->t_ctid = Ctid;
Ctid = newtup.t_self;
TransactionIdStore(myXID, (TransactionId*) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
num_moved++;
/*
* Remember that we moved tuple from the current page
* (corresponding index tuple will be cleaned).
*/
if (Cbuf == buf)
vpc->vpd_offsets[vpc->vpd_offsets_free++] =
ItemPointerGetOffsetNumber(&(tuple.t_self));
vpc->vpd_offsets[vpc->vpd_offsets_free++] =
ItemPointerGetOffsetNumber(&(tuple.t_self));
else
keep_tuples++;
@ -1432,12 +1447,12 @@ moving chain: failed to add item with len = %u to page %u",
for (i = 0, idcur = Idesc; i < nindices; i++, idcur++)
{
FormIndexDatum(idcur->natts,
(AttrNumber *) &(idcur->tform->indkey[0]),
&newtup,
tupdesc,
idatum,
inulls,
idcur->finfoP);
(AttrNumber *) &(idcur->tform->indkey[0]),
&newtup,
tupdesc,
idatum,
inulls,
idcur->finfoP);
iresult = index_insert(Irel[i],
idatum,
inulls,
@ -1507,13 +1522,13 @@ moving chain: failed to add item with len = %u to page %u",
RelationInvalidateHeapTuple(onerel, &tuple);
/*
* Mark new tuple as moved_in by vacuum and
* store vacuum XID in t_cmin !!!
/*
* Mark new tuple as moved_in by vacuum and store vacuum XID
* in t_cmin !!!
*/
TransactionIdStore(myXID, (TransactionId*) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_OFF);
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
newtup.t_data->t_infomask |= HEAP_MOVED_IN;
/* add tuple to the page */
@ -1532,13 +1547,13 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
ItemPointerSet(&(newtup.t_data->t_ctid), cur_page->vpd_blkno, newoff);
newtup.t_self = newtup.t_data->t_ctid;
/*
* Mark old tuple as moved_off by vacuum and
* store vacuum XID in t_cmin !!!
/*
* Mark old tuple as moved_off by vacuum and store vacuum XID
* in t_cmin !!!
*/
TransactionIdStore(myXID, (TransactionId*) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
cur_page->vpd_offsets_used++;
@ -1572,11 +1587,11 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (offnum < maxoff && keep_tuples > 0)
{
OffsetNumber off;
OffsetNumber off;
for (off = OffsetNumberNext(offnum);
off <= maxoff;
off = OffsetNumberNext(off))
off <= maxoff;
off = OffsetNumberNext(off))
{
itemid = PageGetItemId(page, off);
if (!ItemIdIsUsed(itemid))
@ -1584,21 +1599,22 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
continue;
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (4)");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (chain_tuple_moved) /* some chains was moved while */
{ /* cleaning this page */
if (chain_tuple_moved) /* some chains was moved
* while */
{ /* cleaning this page */
Assert(vpc->vpd_offsets_free > 0);
for (i = 0; i < vpc->vpd_offsets_free; i++)
{
if (vpc->vpd_offsets[i] == off)
break;
}
if (i >= vpc->vpd_offsets_free) /* not found */
if (i >= vpc->vpd_offsets_free) /* not found */
{
vpc->vpd_offsets[vpc->vpd_offsets_free++] = off;
Assert(keep_tuples > 0);
@ -1619,8 +1635,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
{
if (chain_tuple_moved) /* else - they are ordered */
{
qsort((char *) (vpc->vpd_offsets), vpc->vpd_offsets_free,
sizeof(OffsetNumber), vc_cmp_offno);
qsort((char *) (vpc->vpd_offsets), vpc->vpd_offsets_free,
sizeof(OffsetNumber), vc_cmp_offno);
}
vc_reappage(&Nvpl, vpc);
WriteBuffer(buf);
@ -1645,6 +1661,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (num_moved > 0)
{
/*
* We have to commit our tuple' movings before we'll truncate
* relation, but we shouldn't lose our locks. And so - quick hack:
@ -1657,8 +1674,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
}
/*
* Clean uncleaned reapped pages from vacuum_pages list list and set xmin
* committed for inserted tuples
* Clean uncleaned reapped pages from vacuum_pages list list and set
* xmin committed for inserted tuples
*/
checked_moved = 0;
for (i = 0, vpp = vacuum_pages->vpl_pagedesc; i < vacuumed_pages; i++, vpp++)
@ -1671,7 +1688,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (!PageIsEmpty(page))
vc_vacpage(page, *vpp);
}
else /* this page was used */
else
/* this page was used */
{
num_tuples = 0;
max_offset = PageGetMaxOffsetNumber(page);
@ -1685,7 +1703,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (2)");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
{
@ -1734,8 +1752,8 @@ Elapsed %u/%u sec.",
}
Assert(keep_tuples >= 0);
for (i = 0; i < nindices; i++)
vc_vaconeind(&Nvpl, Irel[i],
vacrelstats->num_tuples, keep_tuples);
vc_vaconeind(&Nvpl, Irel[i],
vacrelstats->num_tuples, keep_tuples);
}
/*
@ -1757,7 +1775,7 @@ Elapsed %u/%u sec.",
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (3)");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@ -1998,7 +2016,7 @@ vc_vaconeind(VPageList vpl, Relation indrel, int num_tuples, int keep_tuples)
getrusage(RUSAGE_SELF, &ru1);
elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %u: Deleted %u. Elapsed %u/%u sec.",
indrel->rd_rel->relname.data, num_pages,
indrel->rd_rel->relname.data, num_pages,
num_index_tuples - keep_tuples, tups_vacuumed,
ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
@ -2208,25 +2226,25 @@ vc_bucketcpy(Form_pg_attribute attr, Datum value, Datum *bucket, int16 *bucket_l
static void
vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *vacrelstats)
{
Relation rd,
ad,
sd;
HeapScanDesc scan;
HeapTupleData rtup;
HeapTuple ctup,
atup,
stup;
Form_pg_class pgcform;
ScanKeyData askey;
Form_pg_attribute attp;
Buffer buffer;
Relation rd,
ad,
sd;
HeapScanDesc scan;
HeapTupleData rtup;
HeapTuple ctup,
atup,
stup;
Form_pg_class pgcform;
ScanKeyData askey;
Form_pg_attribute attp;
Buffer buffer;
/*
* update number of tuples and number of pages in pg_class
*/
ctup = SearchSysCacheTupleCopy(RELOID,
ObjectIdGetDatum(relid),
0, 0, 0);
ObjectIdGetDatum(relid),
0, 0, 0);
if (!HeapTupleIsValid(ctup))
elog(ERROR, "pg_class entry for relid %u vanished during vacuuming",
relid);
@ -2237,7 +2255,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
rtup.t_self = ctup->t_self;
heap_fetch(rd, SnapshotNow, &rtup, &buffer);
pfree(ctup);
/* overwrite the existing statistics in the tuple */
vc_setpagelock(rd, ItemPointerGetBlockNumber(&(rtup.t_self)));
pgcform = (Form_pg_class) GETSTRUCT(&rtup);
@ -2317,8 +2335,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
attp->attdisbursion = selratio;
/*
* Invalidate the cache for the tuple
* and write the buffer
* Invalidate the cache for the tuple and write the buffer
*/
RelationInvalidateHeapTuple(ad, atup);
WriteNoReleaseBuffer(abuffer);
@ -2375,8 +2392,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
}
/*
* Invalidate the cached pg_class tuple and
* write the buffer
* Invalidate the cached pg_class tuple and write the buffer
*/
RelationInvalidateHeapTuple(rd, &rtup);
@ -2504,8 +2520,8 @@ vc_free(VRelList vrl)
}
static void *
vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *))
vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *))
{
int res;
int last = nelem - 1;
@ -2527,16 +2543,16 @@ vc_find_eq(void *bot, int nelem, int size, void *elm,
}
if (last_move == true)
{
res = compar(elm, (void *)((char *)bot + last * size));
res = compar(elm, (void *) ((char *) bot + last * size));
if (res > 0)
return NULL;
if (res == 0)
return (void *)((char *)bot + last * size);
return (void *) ((char *) bot + last * size);
last_move = false;
}
res = compar(elm, (void *)((char *)bot + celm * size));
res = compar(elm, (void *) ((char *) bot + celm * size));
if (res == 0)
return (void *)((char *)bot + celm * size);
return (void *) ((char *) bot + celm * size);
if (res < 0)
{
if (celm == 0)
@ -2551,7 +2567,7 @@ vc_find_eq(void *bot, int nelem, int size, void *elm,
return NULL;
last = last - celm - 1;
bot = (void *)((char *)bot + (celm + 1) * size);
bot = (void *) ((char *) bot + (celm + 1) * size);
celm = (last + 1) / 2;
first_move = true;
}
@ -2591,25 +2607,25 @@ static int
vc_cmp_vtlinks(const void *left, const void *right)
{
if (((VTupleLink)left)->new_tid.ip_blkid.bi_hi <
((VTupleLink)right)->new_tid.ip_blkid.bi_hi)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi <
((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
return -1;
if (((VTupleLink)left)->new_tid.ip_blkid.bi_hi >
((VTupleLink)right)->new_tid.ip_blkid.bi_hi)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi >
((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
return 1;
/* bi_hi-es are equal */
if (((VTupleLink)left)->new_tid.ip_blkid.bi_lo <
((VTupleLink)right)->new_tid.ip_blkid.bi_lo)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo <
((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
return -1;
if (((VTupleLink)left)->new_tid.ip_blkid.bi_lo >
((VTupleLink)right)->new_tid.ip_blkid.bi_lo)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo >
((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
return 1;
/* bi_lo-es are equal */
if (((VTupleLink)left)->new_tid.ip_posid <
((VTupleLink)right)->new_tid.ip_posid)
if (((VTupleLink) left)->new_tid.ip_posid <
((VTupleLink) right)->new_tid.ip_posid)
return -1;
if (((VTupleLink)left)->new_tid.ip_posid >
((VTupleLink)right)->new_tid.ip_posid)
if (((VTupleLink) left)->new_tid.ip_posid >
((VTupleLink) right)->new_tid.ip_posid)
return 1;
return 0;

View File

@ -2,7 +2,7 @@
* Routines for handling of 'SET var TO',
* 'SHOW var' and 'RESET var' statements.
*
* $Id: variable.c,v 1.19 1999/02/18 06:00:44 momjian Exp $
* $Id: variable.c,v 1.20 1999/05/25 16:08:28 momjian Exp $
*
*/
@ -45,10 +45,12 @@ static bool parse_ksqo(const char *);
static bool show_XactIsoLevel(void);
static bool reset_XactIsoLevel(void);
static bool parse_XactIsoLevel(const char *);
#ifdef QUERY_LIMIT
static bool show_query_limit(void);
static bool reset_query_limit(void);
static bool parse_query_limit(const char *);
#endif
extern Cost _cpu_page_wight_;
@ -545,41 +547,41 @@ reset_timezone()
static bool
parse_query_limit(const char *value)
{
int32 limit;
int32 limit;
if (value == NULL) {
reset_query_limit();
return(TRUE);
}
/* why is pg_atoi's arg not declared "const char *" ? */
limit = pg_atoi((char *) value, sizeof(int32), '\0');
if (limit <= -1) {
elog(ERROR, "Bad value for # of query limit (%s)", value);
}
ExecutorLimit(limit);
return(TRUE);
if (value == NULL)
{
reset_query_limit();
return (TRUE);
}
/* why is pg_atoi's arg not declared "const char *" ? */
limit = pg_atoi((char *) value, sizeof(int32), '\0');
if (limit <= -1)
elog(ERROR, "Bad value for # of query limit (%s)", value);
ExecutorLimit(limit);
return (TRUE);
}
static bool
show_query_limit(void)
{
int limit;
int limit;
limit = ExecutorGetLimit();
if (limit == ALL_TUPLES) {
elog(NOTICE, "No query limit is set");
} else {
elog(NOTICE, "query limit is %d",limit);
}
return(TRUE);
limit = ExecutorGetLimit();
if (limit == ALL_TUPLES)
elog(NOTICE, "No query limit is set");
else
elog(NOTICE, "query limit is %d", limit);
return (TRUE);
}
static bool
reset_query_limit(void)
{
ExecutorLimit(ALL_TUPLES);
return(TRUE);
ExecutorLimit(ALL_TUPLES);
return (TRUE);
}
#endif
/*-----------------------------------------------------------------------*/
@ -685,10 +687,10 @@ ResetPGVariable(const char *name)
/*-----------------------------------------------------------------------
KSQO code will one day be unnecessary when the optimizer makes use of
KSQO code will one day be unnecessary when the optimizer makes use of
indexes when multiple ORs are specified in the where clause.
See optimizer/prep/prepkeyset.c for more on this.
daveh@insightdist.com 6/16/98
daveh@insightdist.com 6/16/98
-----------------------------------------------------------------------*/
static bool
parse_ksqo(const char *value)
@ -732,7 +734,7 @@ reset_ksqo()
static bool
parse_XactIsoLevel(const char *value)
{
if (value == NULL)
{
reset_XactIsoLevel();
@ -770,7 +772,7 @@ show_XactIsoLevel()
static bool
reset_XactIsoLevel()
{
if (SerializableSnapshot != NULL)
{
elog(ERROR, "SET TRANSACTION ISOLATION LEVEL must be called before any query");

View File

@ -5,11 +5,11 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: view.c,v 1.32 1999/02/13 23:15:12 momjian Exp $
* $Id: view.c,v 1.33 1999/05/25 16:08:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <postgres.h>
@ -230,9 +230,9 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
* table... CURRENT first, then NEW....
*/
rt_entry1 = addRangeTableEntry(NULL, (char *) viewName, "*CURRENT*",
FALSE, FALSE);
FALSE, FALSE);
rt_entry2 = addRangeTableEntry(NULL, (char *) viewName, "*NEW*",
FALSE, FALSE);
FALSE, FALSE);
new_rt = lcons(rt_entry2, old_rt);
new_rt = lcons(rt_entry1, new_rt);

View File

@ -6,15 +6,15 @@
* Copyright (c) 1994, Regents of the University of California
*
* DESCRIPTION
* This code provides support for a tee node, which allows
* multiple parent in a megaplan.
* This code provides support for a tee node, which allows
* multiple parent in a megaplan.
*
* INTERFACE ROUTINES
* ExecTee
* ExecInitTee
* ExecEndTee
*
* $Id: nodeTee.c,v 1.1 1999/03/23 16:50:49 momjian Exp $
* $Id: nodeTee.c,v 1.2 1999/05/25 16:08:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -46,12 +46,12 @@
bool
ExecInitTee(Tee *node, EState *currentEstate, Plan *parent)
{
TeeState *teeState;
Plan *outerPlan;
int len;
TeeState *teeState;
Plan *outerPlan;
int len;
Relation bufferRel;
TupleDesc tupType;
EState *estate;
EState *estate;
/*
* it is possible that the Tee has already been initialized since it
@ -167,7 +167,7 @@ ExecInitTee(Tee *node, EState *currentEstate, Plan *parent)
else
bufferRel = heap_open(
heap_create_with_catalog(teeState->tee_bufferRelname,
tupType, RELKIND_RELATION, false));
tupType, RELKIND_RELATION, false));
}
else
{
@ -176,7 +176,7 @@ ExecInitTee(Tee *node, EState *currentEstate, Plan *parent)
newoid());
bufferRel = heap_open(
heap_create_with_catalog(teeState->tee_bufferRelname,
tupType, RELKIND_RELATION, false));
tupType, RELKIND_RELATION, false));
}
teeState->tee_bufferRel = bufferRel;
@ -339,6 +339,7 @@ ExecTee(Tee *node, Plan *parent)
slot = ExecProcNode(childNode, (Plan *) node);
if (!TupIsNull(slot))
{
/*
* heap_insert changes something...
*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: execAmi.c,v 1.34 1999/05/10 00:45:05 momjian Exp $
* $Id: execAmi.c,v 1.35 1999/05/25 16:08:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -24,7 +24,7 @@
* ExecCreatR function to create temporary relations
*
*/
#include <stdio.h>
#include <stdio.h>
#include "postgres.h"

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.50 1999/03/20 02:07:31 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.51 1999/05/25 16:08:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,7 +66,7 @@ bool execConstByVal;
int execConstLen;
/* static functions decls */
static Datum ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull);
static Datum ExecEvalAggref(Aggref * aggref, ExprContext *econtext, bool *isNull);
static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext,
bool *isNull, bool *isDone);
static Datum ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull);
@ -190,7 +190,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
* ----------------------------------------------------------------
*/
static Datum
ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull)
ExecEvalAggref(Aggref * aggref, ExprContext *econtext, bool *isNull)
{
*isNull = econtext->ecxt_nulls[aggref->aggno];
return econtext->ecxt_values[aggref->aggno];
@ -232,7 +232,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
int16 len;
/*
* get the slot we want
* get the slot we want
*/
switch (variable->varno)
{
@ -251,7 +251,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
}
/*
* extract tuple information from the slot
* extract tuple information from the slot
*/
heapTuple = slot->val;
tuple_type = slot->ttc_tupleDescriptor;
@ -270,7 +270,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
* the entire tuple, we give back a whole slot so that callers know
* what the tuple looks like.
*/
if (attnum == InvalidAttrNumber)
if (attnum == InvalidAttrNumber)
{
TupleTableSlot *tempSlot;
TupleDesc td;
@ -299,26 +299,25 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
isNull); /* return: is attribute null? */
/*
* return null if att is null
* return null if att is null
*/
if (*isNull)
return (Datum) NULL;
/*
* get length and type information..
* ??? what should we do about variable length attributes
* - variable length attributes have their length stored
* in the first 4 bytes of the memory pointed to by the
* returned value.. If we can determine that the type
* is a variable length type, we can do the right thing.
* -cim 9/15/89
* get length and type information.. ??? what should we do about
* variable length attributes - variable length attributes have their
* length stored in the first 4 bytes of the memory pointed to by the
* returned value.. If we can determine that the type is a variable
* length type, we can do the right thing. -cim 9/15/89
*/
if (attnum < 0)
{
/*
* If this is a pseudo-att, we get the type and fake the length.
* There ought to be a routine to return the real lengths, so
* we'll mark this one ... XXX -mao
* If this is a pseudo-att, we get the type and fake the length.
* There ought to be a routine to return the real lengths, so
* we'll mark this one ... XXX -mao
*/
len = heap_sysattrlen(attnum); /* XXX see -mao above */
byval = heap_sysattrbyval(attnum); /* XXX see -mao above */
@ -609,11 +608,11 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
i = 0;
foreach(arg, argList)
{
/*
* evaluate the expression, in general functions cannot take
* sets as arguments but we make an exception in the case of
* nested dot expressions. We have to watch out for this case
* here.
* evaluate the expression, in general functions cannot take sets
* as arguments but we make an exception in the case of nested dot
* expressions. We have to watch out for this case here.
*/
argV[i] = (Datum)
ExecEvalExpr((Node *) lfirst(arg),
@ -671,10 +670,10 @@ ExecMakeFunctionResult(Node *node,
}
/*
* arguments is a list of expressions to evaluate
* before passing to the function manager.
* We collect the results of evaluating the expressions
* into a datum array (argV) and pass this array to arrayFmgr()
* arguments is a list of expressions to evaluate before passing to
* the function manager. We collect the results of evaluating the
* expressions into a datum array (argV) and pass this array to
* arrayFmgr()
*/
if (fcache->nargs != 0)
{
@ -743,8 +742,8 @@ ExecMakeFunctionResult(Node *node,
}
/*
* now return the value gotten by calling the function manager,
* passing the function the evaluated parameter values.
* now return the value gotten by calling the function manager,
* passing the function the evaluated parameter values.
*/
if (fcache->language == SQLlanguageId)
{
@ -843,12 +842,12 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
bool isDone;
/*
* an opclause is a list (op args). (I think)
* an opclause is a list (op args). (I think)
*
* we extract the oid of the function associated with
* the op and then pass the work onto ExecMakeFunctionResult
* which evaluates the arguments and returns the result of
* calling the function on the evaluated arguments.
* we extract the oid of the function associated with the op and then
* pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* evaluated arguments.
*/
op = (Oper *) opClause->oper;
argList = opClause->args;
@ -865,8 +864,8 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
}
/*
* call ExecMakeFunctionResult() with a dummy isDone that we ignore.
* We don't have operator whose arguments are sets.
* call ExecMakeFunctionResult() with a dummy isDone that we ignore.
* We don't have operator whose arguments are sets.
*/
return ExecMakeFunctionResult((Node *) op, argList, econtext, isNull, &isDone);
}
@ -887,14 +886,14 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache;
/*
* an funcclause is a list (func args). (I think)
* an funcclause is a list (func args). (I think)
*
* we extract the oid of the function associated with
* the func node and then pass the work onto ExecMakeFunctionResult
* which evaluates the arguments and returns the result of
* calling the function on the evaluated arguments.
* we extract the oid of the function associated with the func node and
* then pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
* this is nearly identical to the ExecEvalOper code.
*/
func = (Func *) funcClause->oper;
argList = funcClause->args;
@ -939,21 +938,21 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
clause = lfirst(notclause->args);
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone flag,
* but ignore it.
*/
expr_value = ExecEvalExpr(clause, econtext, isNull, &isDone);
/*
* if the expression evaluates to null, then we just
* cascade the null back to whoever called us.
* if the expression evaluates to null, then we just cascade the null
* back to whoever called us.
*/
if (*isNull)
return expr_value;
/*
* evaluation of 'not' is simple.. expr is false, then
* return 'true' and vice versa.
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
*/
if (DatumGetInt32(expr_value) == 0)
return (Datum) true;
@ -978,22 +977,19 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
clauses = orExpr->args;
/*
* we use three valued logic functions here...
* we evaluate each of the clauses in turn,
* as soon as one is true we return that
* value. If none is true and none of the
* clauses evaluate to NULL we return
* the value of the last clause evaluated (which
* should be false) with *isNull set to false else
* if none is true and at least one clause evaluated
* to NULL we set *isNull flag to true -
* we use three valued logic functions here... we evaluate each of the
* clauses in turn, as soon as one is true we return that value. If
* none is true and none of the clauses evaluate to NULL we return
* the value of the last clause evaluated (which should be false) with
* *isNull set to false else if none is true and at least one clause
* evaluated to NULL we set *isNull flag to true -
*/
foreach(clause, clauses)
{
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
*/
const_value = ExecEvalExpr((Node *) lfirst(clause),
econtext,
@ -1001,34 +997,32 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if the expression evaluates to null, then we
* remember it in the local IsNull flag, if none of the
* clauses are true then we need to set *isNull
* to true again.
* if the expression evaluates to null, then we remember it in the
* local IsNull flag, if none of the clauses are true then we need
* to set *isNull to true again.
*/
if (*isNull)
{
IsNull = *isNull;
/*
* Many functions don't (or can't!) check if an argument is NULL
* or NOT_NULL and may return TRUE (1) with *isNull TRUE
* (an_int4_column <> 1: int4ne returns TRUE for NULLs).
* Not having time to fix the function manager I want to fix OR:
* if we had 'x <> 1 OR x isnull' then when x is NULL
* TRUE was returned by the 'x <> 1' clause ...
* but ExecQualClause says that the qualification should *fail*
* if isnull is TRUE for any value returned by ExecEvalExpr.
* So, force this rule here:
* if isnull is TRUE then the clause failed.
* Note: nullvalue() & nonnullvalue() always sets isnull to FALSE for NULLs.
* - vadim 09/22/97
* Many functions don't (or can't!) check if an argument is
* NULL or NOT_NULL and may return TRUE (1) with *isNull TRUE
* (an_int4_column <> 1: int4ne returns TRUE for NULLs). Not
* having time to fix the function manager I want to fix OR:
* if we had 'x <> 1 OR x isnull' then when x is NULL TRUE was
* returned by the 'x <> 1' clause ... but ExecQualClause says
* that the qualification should *fail* if isnull is TRUE for
* any value returned by ExecEvalExpr. So, force this rule
* here: if isnull is TRUE then the clause failed. Note:
* nullvalue() & nonnullvalue() always sets isnull to FALSE
* for NULLs. - vadim 09/22/97
*/
const_value = 0;
}
/*
* if we have a true result, then we return it.
* if we have a true result, then we return it.
*/
if (DatumGetInt32(const_value) != 0)
return const_value;
@ -1057,18 +1051,16 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
clauses = andExpr->args;
/*
* we evaluate each of the clauses in turn,
* as soon as one is false we return that
* value. If none are false or NULL then we return
* the value of the last clause evaluated, which
* should be true.
* we evaluate each of the clauses in turn, as soon as one is false we
* return that value. If none are false or NULL then we return the
* value of the last clause evaluated, which should be true.
*/
foreach(clause, clauses)
{
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
*/
const_value = ExecEvalExpr((Node *) lfirst(clause),
econtext,
@ -1076,17 +1068,16 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if the expression evaluates to null, then we
* remember it in IsNull, if none of the clauses after
* this evaluates to false we will have to set *isNull
* to true again.
* if the expression evaluates to null, then we remember it in
* IsNull, if none of the clauses after this evaluates to false we
* will have to set *isNull to true again.
*/
if (*isNull)
IsNull = *isNull;
/*
* if we have a false result, then we return it, since the
* conjunction must be false.
* if we have a false result, then we return it, since the
* conjunction must be false.
*/
if (DatumGetInt32(const_value) == 0)
return const_value;
@ -1106,7 +1097,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
ExecEvalCase(CaseExpr * caseExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@ -1117,17 +1108,16 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
clauses = caseExpr->args;
/*
* we evaluate each of the WHEN clauses in turn,
* as soon as one is true we return the corresponding
* result. If none are true then we return the value
* of the default clause, or NULL.
* we evaluate each of the WHEN clauses in turn, as soon as one is
* true we return the corresponding result. If none are true then we
* return the value of the default clause, or NULL.
*/
foreach(clause, clauses)
{
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
*/
wclause = lfirst(clause);
@ -1137,8 +1127,8 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if we have a true test, then we return the result,
* since the case statement is satisfied.
* if we have a true test, then we return the result, since the
* case statement is satisfied.
*/
if (DatumGetInt32(const_value) != 0)
{
@ -1159,9 +1149,7 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
&isDone);
}
else
{
*isNull = true;
}
return const_value;
}
@ -1204,8 +1192,8 @@ ExecEvalExpr(Node *expression,
*isDone = true;
/*
* here we dispatch the work to the appropriate type
* of function given the type of our expression.
* here we dispatch the work to the appropriate type of function given
* the type of our expression.
*/
if (expression == NULL)
{
@ -1287,7 +1275,7 @@ ExecEvalExpr(Node *expression,
}
return retDatum;
} /* ExecEvalExpr() */
} /* ExecEvalExpr() */
/* ----------------------------------------------------------------
@ -1325,16 +1313,15 @@ ExecQualClause(Node *clause, ExprContext *econtext)
ExecEvalExpr(clause, econtext, &isNull, &isDone);
/*
* this is interesting behaviour here. When a clause evaluates
* to null, then we consider this as passing the qualification.
* it seems kind of like, if the qual is NULL, then there's no
* qual..
* this is interesting behaviour here. When a clause evaluates to
* null, then we consider this as passing the qualification. it seems
* kind of like, if the qual is NULL, then there's no qual..
*/
if (isNull)
return true;
/*
* remember, we return true when the qualification fails..
* remember, we return true when the qualification fails..
*/
if (DatumGetInt32(expr_value) == 0)
return true;
@ -1356,7 +1343,7 @@ ExecQual(List *qual, ExprContext *econtext)
bool result;
/*
* debugging stuff
* debugging stuff
*/
EV_printf("ExecQual: qual is ");
EV_nodeDisplay(qual);
@ -1365,18 +1352,18 @@ ExecQual(List *qual, ExprContext *econtext)
IncrProcessed();
/*
* return true immediately if no qual
* return true immediately if no qual
*/
if (qual == NIL)
return true;
/*
* a "qual" is a list of clauses. To evaluate the
* qual, we evaluate each of the clauses in the list.
* a "qual" is a list of clauses. To evaluate the qual, we evaluate
* each of the clauses in the list.
*
* ExecQualClause returns true when we know the qualification
* *failed* so we just pass each clause in qual to it until
* we know the qual failed or there are no more clauses.
* ExecQualClause returns true when we know the qualification *failed* so
* we just pass each clause in qual to it until we know the qual
* failed or there are no more clauses.
*/
result = false;
@ -1388,9 +1375,9 @@ ExecQual(List *qual, ExprContext *econtext)
}
/*
* if result is true, then it means a clause failed so we
* return false. if result is false then it means no clause
* failed so we return true.
* if result is true, then it means a clause failed so we return
* false. if result is false then it means no clause failed so we
* return true.
*/
if (result == true)
return false;
@ -1447,48 +1434,46 @@ ExecTargetList(List *targetlist,
bool isNull;
/*
* debugging stuff
* debugging stuff
*/
EV_printf("ExecTargetList: tl is ");
EV_nodeDisplay(targetlist);
EV_printf("\n");
/*
* Return a dummy tuple if the targetlist is empty.
* the dummy tuple is necessary to differentiate
* between passing and failing the qualification.
* Return a dummy tuple if the targetlist is empty. the dummy tuple is
* necessary to differentiate between passing and failing the
* qualification.
*/
if (targetlist == NIL)
{
/*
* I now think that the only time this makes
* any sense is when we run a delete query. Then
* we need to return something other than nil
* so we know to delete the tuple associated
* with the saved tupleid.. see what ExecutePlan
* does with the returned tuple.. -cim 9/21/89
* I now think that the only time this makes any sense is when we
* run a delete query. Then we need to return something other
* than nil so we know to delete the tuple associated with the
* saved tupleid.. see what ExecutePlan does with the returned
* tuple.. -cim 9/21/89
*
* It could also happen in queries like:
* retrieve (foo.all) where bar.a = 3
* It could also happen in queries like: retrieve (foo.all) where
* bar.a = 3
*
* is this a new phenomenon? it might cause bogus behavior
* if we try to free this tuple later!! I put a hook in
* ExecProject to watch out for this case -mer 24 Aug 1992
* is this a new phenomenon? it might cause bogus behavior if we try
* to free this tuple later!! I put a hook in ExecProject to watch
* out for this case -mer 24 Aug 1992
*
* We must return dummy tuple!!! Try
* select t1.x from t1, t2 where t1.y = 1 and t2.y = 1
* - t2 scan target list will be empty and so no one tuple
* will be returned! But Mer was right - dummy tuple
* must be palloced... - vadim 03/01/1999
* We must return dummy tuple!!! Try select t1.x from t1, t2 where
* t1.y = 1 and t2.y = 1 - t2 scan target list will be empty and
* so no one tuple will be returned! But Mer was right - dummy
* tuple must be palloced... - vadim 03/01/1999
*/
*isDone = true;
return (HeapTuple) palloc(1);
}
/*
* allocate an array of char's to hold the "null" information
* only if we have a really large targetlist. otherwise we use
* the stack.
* allocate an array of char's to hold the "null" information only if
* we have a really large targetlist. otherwise we use the stack.
*/
if (nodomains > 64)
{
@ -1502,20 +1487,21 @@ ExecTargetList(List *targetlist,
}
/*
* evaluate all the expressions in the target list
* evaluate all the expressions in the target list
*/
EV_printf("ExecTargetList: setting target list values\n");
*isDone = true;
foreach(tl, targetlist)
{
/*
* remember, a target list is a list of lists:
* remember, a target list is a list of lists:
*
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
*
* tl is a pointer to successive cdr's of the targetlist
* tle is a pointer to the target list entry in tl
* tl is a pointer to successive cdr's of the targetlist tle is a
* pointer to the target list entry in tl
*/
tle = lfirst(tl);
@ -1572,7 +1558,7 @@ ExecTargetList(List *targetlist,
curNode < nNodes;
curNode++, fjTlist = lnext(fjTlist))
{
#ifdef NOT_USED /* what is this?? */
#ifdef NOT_USED /* what is this?? */
Node *outernode = lfirst(fjTlist);
fjRes = (Resdom *) outernode->iterexpr;
@ -1590,19 +1576,19 @@ ExecTargetList(List *targetlist,
}
/*
* form the new result tuple (in the "normal" context)
* form the new result tuple (in the "normal" context)
*/
newTuple = (HeapTuple) heap_formtuple(targettype, values, null_head);
/*
* free the nulls array if we allocated one..
* free the nulls array if we allocated one..
*/
if (nodomains > 64)
{
pfree(null_head);
pfree(fjIsNull);
}
return newTuple;
}
@ -1631,13 +1617,13 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
HeapTuple newTuple;
/*
* sanity checks
* sanity checks
*/
if (projInfo == NULL)
return (TupleTableSlot *) NULL;
/*
* get the projection info we want
* get the projection info we want
*/
slot = projInfo->pi_slot;
targetlist = projInfo->pi_targetlist;
@ -1648,7 +1634,7 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
econtext = projInfo->pi_exprContext;
/*
* form a new (result) tuple
* form a new (result) tuple
*/
newTuple = ExecTargetList(targetlist,
len,
@ -1658,11 +1644,10 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
isDone);
/*
* store the tuple in the projection slot and return the slot.
* store the tuple in the projection slot and return the slot.
*
* If there's no projection target list we don't want to pfree
* the bogus tuple that ExecTargetList passes back to us.
* -mer 24 Aug 1992
* If there's no projection target list we don't want to pfree the bogus
* tuple that ExecTargetList passes back to us. -mer 24 Aug 1992
*/
return (TupleTableSlot *)
ExecStoreTuple(newTuple,/* tuple to store */
@ -1670,4 +1655,3 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
InvalidBuffer, /* tuple has no buffer */
true);
}

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.24 1999/03/23 16:50:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.25 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -467,6 +467,7 @@ ExecSetSlotPolicy(TupleTableSlot *slot, /* slot to change */
return old_shouldFree;
}
#endif
/* --------------------------------
@ -650,6 +651,7 @@ ExecInitMarkedTupleSlot(EState *estate, MergeJoinState *mergestate)
INIT_SLOT_ALLOC;
mergestate->mj_MarkedTupleSlot = (TupleTableSlot *) slot;
}
#endif
/* ----------------

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.44 1999/03/20 01:13:22 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.45 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -402,7 +402,7 @@ ExecFreeExprContext(CommonState *commonstate)
void
ExecFreeTypeInfo(CommonState *commonstate)
{
TupleDesc tupDesc;
TupleDesc tupDesc;
tupDesc = commonstate->cs_ResultTupleSlot->ttc_tupleDescriptor;
if (tupDesc == NULL)
@ -498,12 +498,12 @@ ExecAssignScanTypeFromOuterPlan(Plan *node, CommonScanState *csstate)
* Routines dealing with the structure 'attribute' which conatains
* the type information about attributes in a tuple:
*
* ExecMakeTypeInfo(noType)
* ExecMakeTypeInfo(noType)
* returns pointer to array of 'noType' structure 'attribute'.
* ExecSetTypeInfo(index, typeInfo, attNum, attLen)
* ExecSetTypeInfo(index, typeInfo, attNum, attLen)
* sets the element indexed by 'index' in typeInfo with
* the values: attNum, attLen.
* ExecFreeTypeInfo(typeInfo)
* ExecFreeTypeInfo(typeInfo)
* frees the structure 'typeInfo'.
* ----------------------------------------------------------------
*/
@ -677,7 +677,7 @@ ExecGetIndexKeyInfo(Form_pg_index indexTuple,
*/
numKeys = 0;
for (i = 0; i < INDEX_MAX_KEYS &&
indexTuple->indkey[i] != InvalidAttrNumber; i++)
indexTuple->indkey[i] != InvalidAttrNumber; i++)
numKeys++;
/* ----------------
@ -711,7 +711,7 @@ ExecGetIndexKeyInfo(Form_pg_index indexTuple,
*/
CXT1_printf("ExecGetIndexKeyInfo: context is %d\n", CurrentMemoryContext);
attKeys = (AttrNumber *)palloc(numKeys * sizeof(AttrNumber));
attKeys = (AttrNumber *) palloc(numKeys * sizeof(AttrNumber));
for (i = 0; i < numKeys; i++)
attKeys[i] = indexTuple->indkey[i];
@ -917,19 +917,20 @@ ExecOpenIndices(Oid resultRelationOid,
if (indexDesc != NULL)
{
relationDescs[i++] = indexDesc;
/*
* Hack for not btree and hash indices: they use relation level
* exclusive locking on updation (i.e. - they are not ready
* for MVCC) and so we have to exclusively lock indices here
* to prevent deadlocks if we will scan them - index_beginscan
* places AccessShareLock, indices update methods don't use
* locks at all. We release this lock in ExecCloseIndices.
* Note, that hashes use page level locking - i.e. are not
* deadlock-free, - let's them be on their way -:))
* vadim 03-12-1998
* Hack for not btree and hash indices: they use relation
* level exclusive locking on updation (i.e. - they are
* not ready for MVCC) and so we have to exclusively lock
* indices here to prevent deadlocks if we will scan them
* - index_beginscan places AccessShareLock, indices
* update methods don't use locks at all. We release this
* lock in ExecCloseIndices. Note, that hashes use page
* level locking - i.e. are not deadlock-free, - let's
* them be on their way -:)) vadim 03-12-1998
*/
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
indexDesc->rd_rel->relam != HASH_AM_OID)
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
indexDesc->rd_rel->relam != HASH_AM_OID)
LockRelation(indexDesc, AccessExclusiveLock);
}
}
@ -1014,15 +1015,17 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
{
if (relationDescs[i] == NULL)
continue;
/*
* Notes in ExecOpenIndices.
*/
if (relationDescs[i]->rd_rel->relam != BTREE_AM_OID &&
relationDescs[i]->rd_rel->relam != HASH_AM_OID)
if (relationDescs[i]->rd_rel->relam != BTREE_AM_OID &&
relationDescs[i]->rd_rel->relam != HASH_AM_OID)
UnlockRelation(relationDescs[i], AccessExclusiveLock);
index_close(relationDescs[i]);
}
/*
* XXX should free indexInfo array here too.
*/
@ -1210,7 +1213,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
result = index_insert(relationDescs[i], /* index relation */
datum, /* array of heaptuple Datums */
nulls, /* info on nulls */
&(heapTuple->t_self), /* tid of heap tuple */
&(heapTuple->t_self), /* tid of heap tuple */
heapRelation);
/* ----------------

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.25 1999/05/13 07:28:29 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.26 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -107,9 +107,9 @@ init_execution_state(FunctionCachePtr fcache,
preves = (execution_state *) NULL;
planTree_list = pg_parse_and_plan(fcache->src, fcache->argOidVect,
nargs, &queryTree_list, None, FALSE);
nargs, &queryTree_list, None, FALSE);
foreach (qtl_item, queryTree_list)
foreach(qtl_item, queryTree_list)
{
Query *queryTree = lfirst(qtl_item);
Plan *planTree = lfirst(planTree_list);
@ -199,7 +199,7 @@ postquel_getnext(execution_state *es)
feature = (LAST_POSTQUEL_COMMAND(es)) ? EXEC_RETONE : EXEC_RUN;
return ExecutorRun(es->qd, es->estate, feature, (Node *)NULL, (Node *)NULL);
return ExecutorRun(es->qd, es->estate, feature, (Node *) NULL, (Node *) NULL);
}
static void

View File

@ -45,7 +45,7 @@ typedef struct AggFuncInfo
FmgrInfo finalfn;
} AggFuncInfo;
static Datum aggGetAttr(TupleTableSlot *tuple, Aggref *aggref, bool *isNull);
static Datum aggGetAttr(TupleTableSlot *tuple, Aggref * aggref, bool *isNull);
/* ---------------------------------------
@ -121,7 +121,8 @@ ExecAgg(Agg *node)
*/
/*
* We loop retrieving groups until we find one matching node->plan.qual
* We loop retrieving groups until we find one matching
* node->plan.qual
*/
do
{
@ -133,7 +134,7 @@ ExecAgg(Agg *node)
econtext = aggstate->csstate.cstate.cs_ExprContext;
nagg = length(node->aggs);
value1 = node->aggstate->csstate.cstate.cs_ExprContext->ecxt_values;
nulls = node->aggstate->csstate.cstate.cs_ExprContext->ecxt_nulls;
@ -163,7 +164,7 @@ ExecAgg(Agg *node)
finalfn_oid;
aggref->aggno = ++aggno;
/* ---------------------
* find transfer functions of all the aggregates and initialize
* their initial values
@ -172,7 +173,7 @@ ExecAgg(Agg *node)
aggname = aggref->aggname;
aggTuple = SearchSysCacheTuple(AGGNAME,
PointerGetDatum(aggname),
ObjectIdGetDatum(aggref->basetype),
ObjectIdGetDatum(aggref->basetype),
0, 0);
if (!HeapTupleIsValid(aggTuple))
elog(ERROR, "ExecAgg: cache lookup failed for aggregate \"%s\"(%s)",
@ -195,9 +196,9 @@ ExecAgg(Agg *node)
fmgr_info(xfn2_oid, &aggFuncInfo[aggno].xfn2);
aggFuncInfo[aggno].xfn2_oid = xfn2_oid;
value2[aggno] = (Datum) AggNameGetInitVal((char *) aggname,
aggp->aggbasetype,
2,
&isNull2);
aggp->aggbasetype,
2,
&isNull2);
/* ------------------------------------------
* If there is a second transition function, its initial
* value must exist -- as it does not depend on data values,
@ -213,9 +214,9 @@ ExecAgg(Agg *node)
fmgr_info(xfn1_oid, &aggFuncInfo[aggno].xfn1);
aggFuncInfo[aggno].xfn1_oid = xfn1_oid;
value1[aggno] = (Datum) AggNameGetInitVal((char *) aggname,
aggp->aggbasetype,
1,
&isNull1);
aggp->aggbasetype,
1,
&isNull1);
/* ------------------------------------------
* If the initial value for the first transition function
@ -245,6 +246,7 @@ ExecAgg(Agg *node)
outerslot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(outerslot))
{
/*
* when the outerplan doesn't return a single tuple,
* create a dummy heaptuple anyway because we still need
@ -299,27 +301,29 @@ ExecAgg(Agg *node)
{
if (noInitValue[aggno])
{
/*
* value1 has not been initialized.
* This is the first non-NULL input value.
* We use it as the initial value for value1.
* value1 has not been initialized. This is the
* first non-NULL input value. We use it as the
* initial value for value1.
*
* But we can't just use it straight, we have to
* make a copy of it since the tuple from which it
* came will be freed on the next iteration of the
* But we can't just use it straight, we have to make
* a copy of it since the tuple from which it came
* will be freed on the next iteration of the
* scan. This requires finding out how to copy
* the Datum. We assume the datum is of the agg's
* basetype, or at least binary compatible with it.
* basetype, or at least binary compatible with
* it.
*/
Type aggBaseType = typeidType(aggref->basetype);
int attlen = typeLen(aggBaseType);
bool byVal = typeByVal(aggBaseType);
Type aggBaseType = typeidType(aggref->basetype);
int attlen = typeLen(aggBaseType);
bool byVal = typeByVal(aggBaseType);
if (byVal)
value1[aggno] = newVal;
else
{
if (attlen == -1) /* variable length */
if (attlen == -1) /* variable length */
attlen = VARSIZE((struct varlena *) newVal);
value1[aggno] = (Datum) palloc(attlen);
memcpy((char *) (value1[aggno]), (char *) newVal,
@ -330,13 +334,14 @@ ExecAgg(Agg *node)
}
else
{
/*
* apply the transition functions.
*/
args[0] = value1[aggno];
args[1] = newVal;
value1[aggno] = (Datum) fmgr_c(&aggfns->xfn1,
(FmgrValues *) args, &isNull1);
value1[aggno] = (Datum) fmgr_c(&aggfns->xfn1,
(FmgrValues *) args, &isNull1);
Assert(!isNull1);
}
}
@ -344,8 +349,8 @@ ExecAgg(Agg *node)
if (aggfns->xfn2.fn_addr != NULL)
{
args[0] = value2[aggno];
value2[aggno] = (Datum) fmgr_c(&aggfns->xfn2,
(FmgrValues *) args, &isNull2);
value2[aggno] = (Datum) fmgr_c(&aggfns->xfn2,
(FmgrValues *) args, &isNull2);
Assert(!isNull2);
}
}
@ -395,7 +400,7 @@ ExecAgg(Agg *node)
else
elog(NOTICE, "ExecAgg: no valid transition functions??");
value1[aggno] = (Datum) fmgr_c(&aggfns->finalfn,
(FmgrValues *) args, &(nulls[aggno]));
(FmgrValues *) args, &(nulls[aggno]));
}
else if (aggfns->xfn1.fn_addr != NULL)
{
@ -441,10 +446,11 @@ ExecAgg(Agg *node)
* As long as the retrieved group does not match the
* qualifications it is ignored and the next group is fetched
*/
if(node->plan.qual != NULL)
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
else qual_result = false;
if (node->plan.qual != NULL)
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
else
qual_result = false;
if (oneTuple)
pfree(oneTuple);
}
@ -466,7 +472,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
AggState *aggstate;
Plan *outerPlan;
ExprContext *econtext;
/*
* assign the node's execution state
*/
@ -478,7 +484,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
aggstate = makeNode(AggState);
node->aggstate = aggstate;
aggstate->agg_done = FALSE;
/*
* assign node's base id and create expression context
*/
@ -494,7 +500,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecInitResultTupleSlot(estate, &aggstate->csstate.cstate);
econtext = aggstate->csstate.cstate.cs_ExprContext;
econtext->ecxt_values = (Datum *) palloc(sizeof(Datum) * length(node->aggs));
econtext->ecxt_values = (Datum *) palloc(sizeof(Datum) * length(node->aggs));
MemSet(econtext->ecxt_values, 0, sizeof(Datum) * length(node->aggs));
econtext->ecxt_nulls = (char *) palloc(sizeof(char) * length(node->aggs));
MemSet(econtext->ecxt_nulls, 0, sizeof(char) * length(node->aggs));
@ -538,8 +544,8 @@ int
ExecCountSlotsAgg(Agg *node)
{
return ExecCountSlotsNode(outerPlan(node)) +
ExecCountSlotsNode(innerPlan(node)) +
AGG_NSLOTS;
ExecCountSlotsNode(innerPlan(node)) +
AGG_NSLOTS;
}
/* ------------------------
@ -576,7 +582,7 @@ ExecEndAgg(Agg *node)
*/
static Datum
aggGetAttr(TupleTableSlot *slot,
Aggref *aggref,
Aggref * aggref,
bool *isNull)
{
Datum result;
@ -622,10 +628,11 @@ aggGetAttr(TupleTableSlot *slot,
return (Datum) tempSlot;
}
result = heap_getattr(heapTuple, /* tuple containing attribute */
attnum, /* attribute number of desired attribute */
tuple_type,/* tuple descriptor of tuple */
isNull); /* return: is attribute null? */
result = heap_getattr(heapTuple, /* tuple containing attribute */
attnum, /* attribute number of desired
* attribute */
tuple_type, /* tuple descriptor of tuple */
isNull); /* return: is attribute null? */
/* ----------------
* return null if att is null

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.18 1999/02/21 03:48:40 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.19 1999/05/25 16:08:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -151,7 +151,7 @@ exec_append_initialize_next(Append *node)
if (appendstate->as_junkFilter_list)
{
estate->es_junkFilter = (JunkFilter *) nth(whichplan,
appendstate->as_junkFilter_list);
appendstate->as_junkFilter_list);
}
if (appendstate->as_result_relation_info_list)
{

View File

@ -13,7 +13,7 @@
* columns. (ie. tuples from the same group are consecutive)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.25 1999/02/13 23:15:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.26 1999/05/25 16:08:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -193,8 +193,8 @@ ExecGroupOneTuple(Group *node)
grpstate->grp_done = TRUE;
return NULL;
}
grpstate->grp_firstTuple = firsttuple =
heap_copytuple(outerslot->val);
grpstate->grp_firstTuple = firsttuple =
heap_copytuple(outerslot->val);
}
/*

View File

@ -6,7 +6,7 @@
* Copyright (c) 1994, Regents of the University of California
*
*
* $Id: nodeHash.c,v 1.35 1999/05/18 21:33:06 tgl Exp $
* $Id: nodeHash.c,v 1.36 1999/05/25 16:08:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -19,7 +19,7 @@
*/
#include <sys/types.h>
#include <stdio.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
@ -80,7 +80,8 @@ ExecHash(Hash *node)
*/
for (i = 0; i < nbatch; i++)
{
File tfile = OpenTemporaryFile();
File tfile = OpenTemporaryFile();
Assert(tfile >= 0);
hashtable->innerBatchFile[i] = BufFileCreate(tfile);
}
@ -247,30 +248,33 @@ ExecHashTableCreate(Hash *node)
int i;
Portal myPortal;
char myPortalName[64];
MemoryContext oldcxt;
MemoryContext oldcxt;
/* ----------------
* Get information about the size of the relation to be hashed
* (it's the "outer" subtree of this node, but the inner relation of
* the hashjoin).
* Caution: this is only the planner's estimates, and so
* can't be trusted too far. Apply a healthy fudge factor.
* Caution: this is only the planner's estimates, and so
* can't be trusted too far. Apply a healthy fudge factor.
* ----------------
*/
outerNode = outerPlan(node);
ntuples = outerNode->plan_size;
if (ntuples <= 0) /* force a plausible size if no info */
ntuples = 1000;
/* estimate tupsize based on footprint of tuple in hashtable...
* but what about palloc overhead?
/*
* estimate tupsize based on footprint of tuple in hashtable... but
* what about palloc overhead?
*/
tupsize = MAXALIGN(outerNode->plan_width) +
MAXALIGN(sizeof(HashJoinTupleData));
inner_rel_bytes = (double) ntuples * tupsize * FUDGE_FAC;
inner_rel_bytes = (double) ntuples *tupsize * FUDGE_FAC;
/*
* Target hashtable size is SortMem kilobytes, but not less than
* sqrt(estimated inner rel size), so as to avoid horrible performance.
* sqrt(estimated inner rel size), so as to avoid horrible
* performance.
*/
hash_table_bytes = sqrt(inner_rel_bytes);
if (hash_table_bytes < (SortMem * 1024L))
@ -278,17 +282,19 @@ ExecHashTableCreate(Hash *node)
/*
* Count the number of hash buckets we want for the whole relation,
* for an average bucket load of NTUP_PER_BUCKET (per virtual bucket!).
* for an average bucket load of NTUP_PER_BUCKET (per virtual
* bucket!).
*/
totalbuckets = (int) ceil((double) ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
/*
* Count the number of buckets we think will actually fit in the
* target memory size, at a loading of NTUP_PER_BUCKET (physical buckets).
* NOTE: FUDGE_FAC here determines the fraction of the hashtable space
* reserved to allow for nonuniform distribution of hash values.
* Perhaps this should be a different number from the other uses of
* FUDGE_FAC, but since we have no real good way to pick either one...
* target memory size, at a loading of NTUP_PER_BUCKET (physical
* buckets). NOTE: FUDGE_FAC here determines the fraction of the
* hashtable space reserved to allow for nonuniform distribution of
* hash values. Perhaps this should be a different number from the
* other uses of FUDGE_FAC, but since we have no real good way to pick
* either one...
*/
bucketsize = NTUP_PER_BUCKET * tupsize;
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
@ -297,21 +303,25 @@ ExecHashTableCreate(Hash *node)
if (totalbuckets <= nbuckets)
{
/* We have enough space, so no batching. In theory we could
* even reduce nbuckets, but since that could lead to poor
* behavior if estimated ntuples is much less than reality,
* it seems better to make more buckets instead of fewer.
/*
* We have enough space, so no batching. In theory we could even
* reduce nbuckets, but since that could lead to poor behavior if
* estimated ntuples is much less than reality, it seems better to
* make more buckets instead of fewer.
*/
totalbuckets = nbuckets;
nbatch = 0;
}
else
{
/* Need to batch; compute how many batches we want to use.
* Note that nbatch doesn't have to have anything to do with
* the ratio totalbuckets/nbuckets; in fact, it is the number
* of groups we will use for the part of the data that doesn't
* fall into the first nbuckets hash buckets.
/*
* Need to batch; compute how many batches we want to use. Note
* that nbatch doesn't have to have anything to do with the ratio
* totalbuckets/nbuckets; in fact, it is the number of groups we
* will use for the part of the data that doesn't fall into the
* first nbuckets hash buckets.
*/
nbatch = (int) ceil((inner_rel_bytes - hash_table_bytes) /
hash_table_bytes);
@ -319,16 +329,17 @@ ExecHashTableCreate(Hash *node)
nbatch = 1;
}
/* Now, totalbuckets is the number of (virtual) hashbuckets for the
/*
* Now, totalbuckets is the number of (virtual) hashbuckets for the
* whole relation, and nbuckets is the number of physical hashbuckets
* we will use in the first pass. Data falling into the first nbuckets
* virtual hashbuckets gets handled in the first pass; everything else
* gets divided into nbatch batches to be processed in additional
* passes.
* we will use in the first pass. Data falling into the first
* nbuckets virtual hashbuckets gets handled in the first pass;
* everything else gets divided into nbatch batches to be processed in
* additional passes.
*/
#ifdef HJDEBUG
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
nbatch, totalbuckets, nbuckets);
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
nbatch, totalbuckets, nbuckets);
#endif
/* ----------------
@ -353,14 +364,16 @@ ExecHashTableCreate(Hash *node)
* ----------------
*/
i = 0;
do {
do
{
i++;
sprintf(myPortalName, "<hashtable %d>", i);
myPortal = GetPortalByName(myPortalName);
} while (PortalIsValid(myPortal));
myPortal = CreatePortal(myPortalName);
Assert(PortalIsValid(myPortal));
hashtable->myPortal = (void*) myPortal; /* kluge for circular includes */
hashtable->myPortal = (void *) myPortal; /* kluge for circular
* includes */
hashtable->hashCxt = (MemoryContext) PortalGetVariableMemory(myPortal);
hashtable->batchCxt = (MemoryContext) PortalGetHeapMemory(myPortal);
@ -392,8 +405,9 @@ ExecHashTableCreate(Hash *node)
/* The files will not be opened until later... */
}
/* Prepare portal for the first-scan space allocations;
* allocate the hashbucket array therein, and set each bucket "empty".
/*
* Prepare portal for the first-scan space allocations; allocate the
* hashbucket array therein, and set each bucket "empty".
*/
MemoryContextSwitchTo(hashtable->batchCxt);
StartPortalAllocMode(DefaultAllocMode, 0);
@ -405,9 +419,7 @@ ExecHashTableCreate(Hash *node)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
{
hashtable->buckets[i] = NULL;
}
MemoryContextSwitchTo(oldcxt);
@ -436,7 +448,7 @@ ExecHashTableDestroy(HashJoinTable hashtable)
/* Destroy the portal to release all working memory */
/* cast here is a kluge for circular includes... */
PortalDestroy((Portal*) & hashtable->myPortal);
PortalDestroy((Portal *) &hashtable->myPortal);
/* And drop the control block */
pfree(hashtable);
@ -468,15 +480,15 @@ ExecHashTableInsert(HashJoinTable hashtable,
* put the tuple in hash table
* ---------------
*/
HashJoinTuple hashTuple;
int hashTupleSize;
HashJoinTuple hashTuple;
int hashTupleSize;
hashTupleSize = MAXALIGN(sizeof(*hashTuple)) + heapTuple->t_len;
hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
hashTupleSize);
if (hashTuple == NULL)
elog(ERROR, "Insufficient memory for hash table.");
memcpy((char *) & hashTuple->htup,
memcpy((char *) &hashTuple->htup,
(char *) heapTuple,
sizeof(hashTuple->htup));
hashTuple->htup.t_data = (HeapTupleHeader)
@ -493,8 +505,9 @@ ExecHashTableInsert(HashJoinTable hashtable,
* put the tuple into a tmp file for other batches
* -----------------
*/
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
hashtable->innerBatchSize[batchno]++;
ExecHashJoinSaveTuple(heapTuple,
hashtable->innerBatchFile[batchno]);
@ -563,26 +576,23 @@ ExecScanHashBucket(HashJoinState *hjstate,
List *hjclauses,
ExprContext *econtext)
{
HashJoinTable hashtable = hjstate->hj_HashTable;
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
HashJoinTable hashtable = hjstate->hj_HashTable;
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
/* hj_CurTuple is NULL to start scanning a new bucket, or the address
/*
* hj_CurTuple is NULL to start scanning a new bucket, or the address
* of the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
{
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
}
else
{
hashTuple = hashTuple->next;
}
while (hashTuple != NULL)
{
HeapTuple heapTuple = & hashTuple->htup;
HeapTuple heapTuple = &hashTuple->htup;
TupleTableSlot *inntuple;
bool qualResult;
bool qualResult;
/* insert hashtable's tuple into exec slot so ExecQual sees it */
inntuple = ExecStoreTuple(heapTuple, /* tuple to store */
@ -618,28 +628,34 @@ ExecScanHashBucket(HashJoinState *hjstate,
static int
hashFunc(Datum key, int len, bool byVal)
{
unsigned int h = 0;
unsigned char *k;
unsigned int h = 0;
unsigned char *k;
if (byVal)
{
if (byVal) {
/*
* If it's a by-value data type, use the 'len' least significant bytes
* of the Datum value. This should do the right thing on either
* bigendian or littleendian hardware --- see the Datum access
* macros in c.h.
* If it's a by-value data type, use the 'len' least significant
* bytes of the Datum value. This should do the right thing on
* either bigendian or littleendian hardware --- see the Datum
* access macros in c.h.
*/
while (len-- > 0) {
while (len-- > 0)
{
h = (h * PRIME1) ^ (key & 0xFF);
key >>= 8;
}
} else {
}
else
{
/*
* If this is a variable length type, then 'k' points to a "struct
* varlena" and len == -1. NOTE: VARSIZE returns the "real" data
* length plus the sizeof the "vl_len" attribute of varlena (the
* length information). 'k' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning of the
* "real" data.
* struct, so we have to use "VARDATA" to find the beginning of
* the "real" data.
*/
if (len == -1)
{
@ -647,9 +663,7 @@ hashFunc(Datum key, int len, bool byVal)
k = (unsigned char *) VARDATA(key);
}
else
{
k = (unsigned char *) key;
}
while (len-- > 0)
h = (h * PRIME1) ^ (*k++);
}
@ -669,7 +683,7 @@ hashFunc(Datum key, int len, bool byVal)
void
ExecHashTableReset(HashJoinTable hashtable, long ntuples)
{
MemoryContext oldcxt;
MemoryContext oldcxt;
int nbuckets = hashtable->nbuckets;
int i;
@ -682,13 +696,14 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
StartPortalAllocMode(DefaultAllocMode, 0);
/*
* We still use the same number of physical buckets as in the first pass.
* (It could be different; but we already decided how many buckets would
* be appropriate for the allowed memory, so stick with that number.)
* We MUST set totalbuckets to equal nbuckets, because from now on
* no tuples will go out to temp files; there are no more virtual buckets,
* only real buckets. (This implies that tuples will go into different
* bucket numbers than they did on the first pass, but that's OK.)
* We still use the same number of physical buckets as in the first
* pass. (It could be different; but we already decided how many
* buckets would be appropriate for the allowed memory, so stick with
* that number.) We MUST set totalbuckets to equal nbuckets, because
* from now on no tuples will go out to temp files; there are no more
* virtual buckets, only real buckets. (This implies that tuples will
* go into different bucket numbers than they did on the first pass,
* but that's OK.)
*/
hashtable->totalbuckets = nbuckets;
@ -700,9 +715,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
{
hashtable->buckets[i] = NULL;
}
MemoryContextSwitchTo(oldcxt);
}
@ -710,6 +723,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
void
ExecReScanHash(Hash *node, ExprContext *exprCtxt, Plan *parent)
{
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.20 1999/05/18 21:33:06 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.21 1999/05/25 16:08:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -23,10 +23,10 @@
#include "optimizer/clauses.h" /* for get_leftop */
static TupleTableSlot *ExecHashJoinOuterGetTuple(Plan *node, Plan *parent,
HashJoinState *hjstate);
HashJoinState *hjstate);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
TupleTableSlot *tupleSlot);
BufFile * file,
TupleTableSlot *tupleSlot);
static int ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable);
static int ExecHashJoinNewBatch(HashJoinState *hjstate);
@ -132,7 +132,8 @@ ExecHashJoin(HashJoin *node)
*/
for (i = 0; i < hashtable->nbatch; i++)
{
File tfile = OpenTemporaryFile();
File tfile = OpenTemporaryFile();
Assert(tfile >= 0);
hashtable->outerBatchFile[i] = BufFileCreate(tfile);
}
@ -149,6 +150,7 @@ ExecHashJoin(HashJoin *node)
for (;;)
{
/*
* if the current outer tuple is nil, get a new one
*/
@ -159,6 +161,7 @@ ExecHashJoin(HashJoin *node)
hjstate);
if (TupIsNull(outerTupleSlot))
{
/*
* when the last batch runs out, clean up and exit
*/
@ -168,8 +171,8 @@ ExecHashJoin(HashJoin *node)
}
/*
* now we have an outer tuple, find the corresponding bucket for
* this tuple from the hash table
* now we have an outer tuple, find the corresponding bucket
* for this tuple from the hash table
*/
econtext->ecxt_outertuple = outerTupleSlot;
hjstate->hj_CurBucketNo = ExecHashGetBucket(hashtable, econtext,
@ -184,20 +187,23 @@ ExecHashJoin(HashJoin *node)
*/
if (hashtable->curbatch == 0)
{
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
hashtable);
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
hashtable);
if (batch > 0)
{
/*
* Need to postpone this outer tuple to a later batch.
* Save it in the corresponding outer-batch file.
*/
int batchno = batch - 1;
int batchno = batch - 1;
hashtable->outerBatchSize[batchno]++;
ExecHashJoinSaveTuple(outerTupleSlot->val,
hashtable->outerBatchFile[batchno]);
hashtable->outerBatchFile[batchno]);
ExecClearTuple(outerTupleSlot);
continue; /* loop around for a new outer tuple */
continue; /* loop around for a new outer tuple */
}
}
}
@ -212,6 +218,7 @@ ExecHashJoin(HashJoin *node)
econtext);
if (curtuple == NULL)
break; /* out of matches */
/*
* we've got a match, but still need to test qpqual
*/
@ -427,32 +434,33 @@ ExecEndHashJoin(HashJoin *node)
static TupleTableSlot *
ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
{
HashJoinTable hashtable = hjstate->hj_HashTable;
int curbatch = hashtable->curbatch;
HashJoinTable hashtable = hjstate->hj_HashTable;
int curbatch = hashtable->curbatch;
TupleTableSlot *slot;
if (curbatch == 0)
{ /* if it is the first pass */
slot = ExecProcNode(node, parent);
if (! TupIsNull(slot))
if (!TupIsNull(slot))
return slot;
/*
* We have just reached the end of the first pass.
* Try to switch to a saved batch.
* We have just reached the end of the first pass. Try to switch
* to a saved batch.
*/
curbatch = ExecHashJoinNewBatch(hjstate);
}
/*
* Try to read from a temp file.
* Loop allows us to advance to new batch as needed.
* Try to read from a temp file. Loop allows us to advance to new
* batch as needed.
*/
while (curbatch <= hashtable->nbatch)
{
slot = ExecHashJoinGetSavedTuple(hjstate,
hashtable->outerBatchFile[curbatch-1],
hashtable->outerBatchFile[curbatch - 1],
hjstate->hj_OuterTupleSlot);
if (! TupIsNull(slot))
if (!TupIsNull(slot))
return slot;
curbatch = ExecHashJoinNewBatch(hjstate);
}
@ -470,12 +478,12 @@ ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
static TupleTableSlot *
ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
BufFile * file,
TupleTableSlot *tupleSlot)
{
HeapTupleData htup;
size_t nread;
HeapTuple heapTuple;
HeapTupleData htup;
size_t nread;
HeapTuple heapTuple;
nread = BufFileRead(file, (void *) &htup, sizeof(HeapTupleData));
if (nread == 0)
@ -484,8 +492,8 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
elog(ERROR, "Read from hashjoin temp file failed");
heapTuple = palloc(HEAPTUPLESIZE + htup.t_len);
memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData));
heapTuple->t_data = (HeapTupleHeader)
((char *) heapTuple + HEAPTUPLESIZE);
heapTuple->t_data = (HeapTupleHeader)
((char *) heapTuple + HEAPTUPLESIZE);
nread = BufFileRead(file, (void *) heapTuple->t_data, htup.t_len);
if (nread != (size_t) htup.t_len)
elog(ERROR, "Read from hashjoin temp file failed");
@ -506,16 +514,17 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
int newbatch = hashtable->curbatch + 1;
long *innerBatchSize = hashtable->innerBatchSize;
long *outerBatchSize = hashtable->outerBatchSize;
BufFile *innerFile;
BufFile *innerFile;
TupleTableSlot *slot;
ExprContext *econtext;
Var *innerhashkey;
if (newbatch > 1)
{
/*
* We no longer need the previous outer batch file;
* close it right away to free disk space.
* We no longer need the previous outer batch file; close it right
* away to free disk space.
*/
BufFileClose(hashtable->outerBatchFile[newbatch - 2]);
hashtable->outerBatchFile[newbatch - 2] = NULL;
@ -541,8 +550,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
return newbatch; /* no more batches */
/*
* Rewind inner and outer batch files for this batch,
* so that we can start reading them.
* Rewind inner and outer batch files for this batch, so that we can
* start reading them.
*/
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0L,
SEEK_SET) != 0L)
@ -571,7 +580,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
}
/*
* after we build the hash table, the inner batch file is no longer needed
* after we build the hash table, the inner batch file is no longer
* needed
*/
BufFileClose(innerFile);
hashtable->innerBatchFile[newbatch - 1] = NULL;
@ -615,9 +625,9 @@ ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable)
void
ExecHashJoinSaveTuple(HeapTuple heapTuple,
BufFile *file)
BufFile * file)
{
size_t written;
size_t written;
written = BufFileWrite(file, (void *) heapTuple, sizeof(HeapTupleData));
if (written != sizeof(HeapTupleData))

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.35 1999/05/10 00:45:06 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.36 1999/05/25 16:08:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -91,13 +91,14 @@ IndexNext(IndexScan *node)
IndexScanDesc scandesc;
Relation heapRelation;
RetrieveIndexResult result;
HeapTuple tuple;
HeapTuple tuple;
TupleTableSlot *slot;
Buffer buffer = InvalidBuffer;
int numIndices;
bool bBackward;
int indexNumber;
bool bBackward;
int indexNumber;
/* ----------------
* extract necessary information from index scan node
* ----------------
@ -114,14 +115,14 @@ IndexNext(IndexScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now.
* We could introduce new nodes for this case and handle
* IndexScan --> NewNode switching in Init/ReScan plan...
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle IndexScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
{
int iptr;
int iptr;
slot->ttc_buffer = InvalidBuffer;
slot->ttc_shouldFree = false;
@ -138,7 +139,7 @@ IndexNext(IndexScan *node)
scanstate->cstate.cs_ExprContext))
break;
}
if (iptr == numIndices) /* would not be returned by indices */
if (iptr == numIndices) /* would not be returned by indices */
slot->val = NULL;
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[node->scan.scanrelid - 1] = true;
@ -153,26 +154,26 @@ IndexNext(IndexScan *node)
* appropriate heap tuple.. else return NULL.
* ----------------
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
{
indexNumber = numIndices - indexstate->iss_IndexPtr - 1;
if (indexNumber < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = numIndices - 1;
}
}
else
{
if ((indexNumber = indexstate->iss_IndexPtr) < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = 0;
}
}
while (indexNumber < numIndices)
{
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
{
indexNumber = numIndices - indexstate->iss_IndexPtr - 1;
if (indexNumber < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = numIndices - 1;
}
}
else
{
if ((indexNumber = indexstate->iss_IndexPtr) < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = 0;
}
}
while (indexNumber < numIndices)
{
scandesc = scanDescs[indexstate->iss_IndexPtr];
while ((result = index_getnext(scandesc, direction)) != NULL)
{
@ -224,14 +225,14 @@ IndexNext(IndexScan *node)
if (BufferIsValid(buffer))
ReleaseBuffer(buffer);
}
if (indexNumber < numIndices)
{
indexNumber++;
if (bBackward)
indexstate->iss_IndexPtr--;
else
indexstate->iss_IndexPtr++;
}
if (indexNumber < numIndices)
{
indexNumber++;
if (bBackward)
indexstate->iss_IndexPtr--;
else
indexstate->iss_IndexPtr++;
}
}
/* ----------------
* if we get here it means the index scan failed so we
@ -323,7 +324,7 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
indexstate->iss_IndexPtr = -1;
/* If this is re-scanning of PlanQual ... */
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
{
estate->es_evTupleNull[node->scan.scanrelid - 1] = false;
@ -703,7 +704,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
run_keys = (n_keys <= 0) ? NULL :
(int *) palloc(n_keys * sizeof(int));
CXT1_printf("ExecInitIndexScan: context is %d\n",CurrentMemoryContext);
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
/* ----------------
* for each opclause in the given qual,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.21 1999/02/13 23:15:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.22 1999/05/25 16:08:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -116,7 +116,7 @@ ExecMaterial(Material *node)
if (TupIsNull(slot))
break;
/*
* heap_insert changes something...
*/
@ -124,12 +124,12 @@ ExecMaterial(Material *node)
heapTuple = heap_copytuple(slot->val);
else
heapTuple = slot->val;
heap_insert(tempRelation, heapTuple);
if (slot->ttc_buffer != InvalidBuffer)
pfree(heapTuple);
ExecClearTuple(slot);
}
currentRelation = tempRelation;
@ -360,8 +360,8 @@ ExecMaterialReScan(Material *node, ExprContext *exprCtxt, Plan *parent)
return;
matstate->csstate.css_currentScanDesc = ExecReScanR(matstate->csstate.css_currentRelation,
matstate->csstate.css_currentScanDesc,
node->plan.state->es_direction, 0, NULL);
matstate->csstate.css_currentScanDesc,
node->plan.state->es_direction, 0, NULL);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.26 1999/05/10 00:45:07 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.27 1999/05/25 16:08:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -103,7 +103,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
* ----------------------------------------------------------------
*/
static List *
MJFormSkipQual(List *qualList, char * replaceopname)
MJFormSkipQual(List *qualList, char *replaceopname)
{
List *qualCopy;
List *qualcdr;
@ -148,14 +148,14 @@ MJFormSkipQual(List *qualList, char * replaceopname)
* ----------------
*/
optup = get_operator_tuple(op->opno);
if (!HeapTupleIsValid(optup)) /* shouldn't happen */
if (!HeapTupleIsValid(optup)) /* shouldn't happen */
elog(ERROR, "MJFormSkipQual: operator %u not found", op->opno);
opform = (Form_pg_operator) GETSTRUCT(optup);
oprleft = opform->oprleft;
oprright = opform->oprright;
/* ----------------
* Now look up the matching "<" or ">" operator. If there isn't one,
* Now look up the matching "<" or ">" operator. If there isn't one,
* whoever marked the "=" operator mergejoinable was a loser.
* ----------------
*/
@ -166,7 +166,7 @@ MJFormSkipQual(List *qualList, char * replaceopname)
CharGetDatum('b'));
if (!HeapTupleIsValid(optup))
elog(ERROR,
"MJFormSkipQual: mergejoin operator %u has no matching %s op",
"MJFormSkipQual: mergejoin operator %u has no matching %s op",
op->opno, replaceopname);
opform = (Form_pg_operator) GETSTRUCT(optup);

View File

@ -27,7 +27,7 @@
* SeqScan (emp.all)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.10 1999/03/20 01:13:22 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.11 1999/05/25 16:08:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -263,8 +263,8 @@ ExecEndResult(Result *node)
* is freed at end-transaction time. -cim 6/2/91
* ----------------
*/
ExecFreeExprContext(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeTypeInfo(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeExprContext(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeTypeInfo(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeProjectionInfo(&resstate->cstate);
/* ----------------
@ -278,7 +278,8 @@ ExecEndResult(Result *node)
* ----------------
*/
ExecClearTuple(resstate->cstate.cs_ResultTupleSlot);
pfree(resstate); node->resstate = NULL; /* XXX - new for us - er1p */
pfree(resstate);
node->resstate = NULL; /* XXX - new for us - er1p */
}
void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.17 1999/02/13 23:15:26 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.18 1999/05/25 16:08:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -68,11 +68,11 @@ SeqNext(SeqScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now.
* We could introduce new nodes for this case and handle
* SeqScan --> NewNode switching in Init/ReScan plan...
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle SeqScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scanrelid - 1] != NULL)
{
slot->ttc_buffer = InvalidBuffer;
@ -83,10 +83,11 @@ SeqNext(SeqScan *node)
return (slot);
}
slot->val = estate->es_evTuple[node->scanrelid - 1];
/*
* Note that unlike IndexScan, SeqScan never use keys
* in heap_beginscan (and this is very bad) - so, here
* we have not check are keys ok or not.
* Note that unlike IndexScan, SeqScan never use keys in
* heap_beginscan (and this is very bad) - so, here we have not
* check are keys ok or not.
*/
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[node->scanrelid - 1] = true;
@ -401,10 +402,11 @@ ExecSeqReScan(SeqScan *node, ExprContext *exprCtxt, Plan *parent)
outerPlan = outerPlan((Plan *) node);
ExecReScan(outerPlan, exprCtxt, parent);
}
else /* otherwise, we are scanning a relation */
else
/* otherwise, we are scanning a relation */
{
/* If this is re-scanning of PlanQual ... */
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scanrelid - 1] != NULL)
{
estate->es_evTupleNull[node->scanrelid - 1] = false;

View File

@ -58,15 +58,16 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
ExecReScan(plan, (ExprContext *) NULL, plan);
/*
* For all sublink types except EXPR_SUBLINK, the result type is boolean,
* and we have a fairly clear idea of how to combine multiple subitems
* and deal with NULL values or an empty subplan result.
* For all sublink types except EXPR_SUBLINK, the result type is
* boolean, and we have a fairly clear idea of how to combine multiple
* subitems and deal with NULL values or an empty subplan result.
*
* For EXPR_SUBLINK, the result type is whatever the combining operator
* returns. We have no way to deal with more than one column in the
* subplan result --- hopefully the parser forbids that. More seriously,
* it's unclear what to do with NULL values or an empty subplan result.
* For now, we error out, but should something else happen?
* subplan result --- hopefully the parser forbids that. More
* seriously, it's unclear what to do with NULL values or an empty
* subplan result. For now, we error out, but should something else
* happen?
*/
for (slot = ExecProcNode(plan, plan);
@ -105,14 +106,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
}
if (subLinkType != EXPR_SUBLINK)
{
if ((! (bool) result && !(sublink->useor)) ||
if ((!(bool) result && !(sublink->useor)) ||
((bool) result && sublink->useor))
break;
}
i++;
}
if (subLinkType == ALL_SUBLINK && ! (bool) result)
if (subLinkType == ALL_SUBLINK && !(bool) result)
break;
if (subLinkType == ANY_SUBLINK && (bool) result)
break;
@ -120,7 +121,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
if (!found)
{
/* deal with empty subplan result. Note default result is 'false' */
/* deal with empty subplan result. Note default result is 'false' */
if (subLinkType == ALL_SUBLINK)
result = (Datum) true;
else if (subLinkType == EXPR_SUBLINK)

View File

@ -3,7 +3,7 @@
* spi.c
* Server Programming Interface
*
* $Id: spi.c,v 1.37 1999/05/13 07:28:30 tgl Exp $
* $Id: spi.c,v 1.38 1999/05/25 16:08:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -19,9 +19,9 @@ static _SPI_connection *_SPI_current = NULL;
static int _SPI_connected = -1;
static int _SPI_curid = -1;
DLLIMPORT uint32 SPI_processed = 0;
DLLIMPORT uint32 SPI_processed = 0;
DLLIMPORT SPITupleTable *SPI_tuptable;
DLLIMPORT int SPI_result;
DLLIMPORT int SPI_result;
static int _SPI_execute(char *src, int tcount, _SPI_plan *plan);
static int _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount);
@ -49,8 +49,8 @@ extern void ShowUsage(void);
int
SPI_connect()
{
char pname[64];
PortalVariableMemory pvmem;
char pname[64];
PortalVariableMemory pvmem;
/*
* It's possible on startup and after commit/abort. In future we'll
@ -345,8 +345,8 @@ SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
mtuple = heap_formtuple(rel->rd_att, v, n);
infomask = mtuple->t_data->t_infomask;
memmove(&(mtuple->t_data->t_oid), &(tuple->t_data->t_oid),
((char *) &(tuple->t_data->t_hoff) -
(char *) &(tuple->t_data->t_oid)));
((char *) &(tuple->t_data->t_hoff) -
(char *) &(tuple->t_data->t_oid)));
mtuple->t_data->t_infomask = infomask;
mtuple->t_data->t_natts = numberOfAttributes;
}
@ -411,8 +411,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
if (isnull)
return NULL;
if (! getTypeOutAndElem((Oid) tupdesc->attrs[fnumber - 1]->atttypid,
&foutoid, &typelem))
if (!getTypeOutAndElem((Oid) tupdesc->attrs[fnumber - 1]->atttypid,
&foutoid, &typelem))
{
SPI_result = SPI_ERROR_NOOUTFUNC;
return NULL;
@ -549,13 +549,13 @@ SPI_pfree(void *pointer)
/* =================== private functions =================== */
/*
* spi_printtup
* spi_printtup
* store tuple retrieved by Executor into SPITupleTable
* of current SPI procedure
*
*/
void
spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver* self)
spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver * self)
{
SPITupleTable *tuptable;
MemoryContext oldcxt;
@ -633,12 +633,13 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
_SPI_current->qtlist = queryTree_list;
foreach (queryTree_list_item, queryTree_list)
foreach(queryTree_list_item, queryTree_list)
{
queryTree = (Query *) lfirst(queryTree_list_item);
planTree = lfirst(planTree_list);
planTree_list = lnext(planTree_list);
islastquery = (planTree_list == NIL); /* assume lists are same len */
islastquery = (planTree_list == NIL); /* assume lists are same
* len */
if (queryTree->commandType == CMD_UTILITY)
{
@ -658,7 +659,7 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
if (plan == NULL)
{
ProcessUtility(queryTree->utilityStmt, None);
if (! islastquery)
if (!islastquery)
CommandCounterIncrement();
else
return res;
@ -717,17 +718,18 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, char *Nulls, int tcount)
_SPI_current->tuptable = NULL;
_SPI_current->qtlist = NULL;
foreach (queryTree_list_item, queryTree_list)
foreach(queryTree_list_item, queryTree_list)
{
queryTree = (Query *) lfirst(queryTree_list_item);
planTree = lfirst(planTree_list);
planTree_list = lnext(planTree_list);
islastquery = (planTree_list == NIL); /* assume lists are same len */
islastquery = (planTree_list == NIL); /* assume lists are same
* len */
if (queryTree->commandType == CMD_UTILITY)
{
ProcessUtility(queryTree->utilityStmt, None);
if (! islastquery)
if (!islastquery)
CommandCounterIncrement();
else
return SPI_OK_UTILITY;
@ -777,7 +779,7 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
char *intoName = NULL;
int res;
Const tcount_const;
Node *count = NULL;
Node *count = NULL;
switch (operation)
{
@ -833,18 +835,18 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
* ----------------
*/
memset(&tcount_const, 0, sizeof(tcount_const));
tcount_const.type = T_Const;
tcount_const.consttype = INT4OID;
tcount_const.constlen = sizeof(int4);
tcount_const.constvalue = (Datum)tcount;
tcount_const.constisnull = FALSE;
tcount_const.constbyval = TRUE;
tcount_const.constisset = FALSE;
tcount_const.constiscast = FALSE;
count = (Node *)&tcount_const;
tcount_const.type = T_Const;
tcount_const.consttype = INT4OID;
tcount_const.constlen = sizeof(int4);
tcount_const.constvalue = (Datum) tcount;
tcount_const.constisnull = FALSE;
tcount_const.constbyval = TRUE;
tcount_const.constisset = FALSE;
tcount_const.constiscast = FALSE;
count = (Node *) &tcount_const;
}
if (state == NULL) /* plan preparation */
return res;
#ifdef SPI_EXECUTOR_STATS
@ -922,7 +924,7 @@ _SPI_procmem()
}
/*
* _SPI_begin_call
* _SPI_begin_call
*
*/
static int

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.10 1999/02/13 23:15:34 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.11 1999/05/25 16:08:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -20,20 +20,20 @@
*/
/*
* FixedItemIsValid
* FixedItemIsValid
* True iff item is valid.
*/
#define FixedItemIsValid(item) PointerIsValid(item)
/*
* FixedStackGetItemBase
* FixedStackGetItemBase
* Returns base of enclosing structure.
*/
#define FixedStackGetItemBase(stack, item) \
((Pointer)((char *)(item) - (stack)->offset))
/*
* FixedStackGetItem
* FixedStackGetItem
* Returns item of given pointer to enclosing structure.
*/
#define FixedStackGetItem(stack, pointer) \
@ -84,7 +84,7 @@ FixedStackPush(FixedStack stack, Pointer pointer)
#ifdef USE_ASSERT_CHECKING
/*
* FixedStackContains
* FixedStackContains
* True iff ordered stack contains given element.
*
* Note:

View File

@ -8,7 +8,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: stringinfo.c,v 1.15 1999/04/25 03:19:25 tgl Exp $
* $Id: stringinfo.c,v 1.16 1999/05/25 16:08:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -67,17 +67,18 @@ initStringInfo(StringInfo str)
static void
enlargeStringInfo(StringInfo str, int needed)
{
int newlen;
char *newdata;
int newlen;
char *newdata;
needed += str->len + 1; /* total space required now */
if (needed <= str->maxlen)
return; /* got enough space already */
/*
* We don't want to allocate just a little more space with each append;
* for efficiency, double the buffer size each time it overflows.
* Actually, we might need to more than double it if 'needed' is big...
* We don't want to allocate just a little more space with each
* append; for efficiency, double the buffer size each time it
* overflows. Actually, we might need to more than double it if
* 'needed' is big...
*/
newlen = 2 * str->maxlen;
while (needed > newlen)
@ -86,7 +87,7 @@ enlargeStringInfo(StringInfo str, int needed)
newdata = palloc(newlen);
if (newdata == NULL)
elog(ERROR,
"enlargeStringInfo: Out of memory (%d bytes requested)", newlen);
"enlargeStringInfo: Out of memory (%d bytes requested)", newlen);
/* OK, transfer data into new buffer, and release old buffer */
memcpy(newdata, str->data, str->len + 1);
@ -107,11 +108,11 @@ enlargeStringInfo(StringInfo str, int needed)
* generated in a single call (not on the total string length).
*/
void
appendStringInfo(StringInfo str, const char *fmt, ...)
appendStringInfo(StringInfo str, const char *fmt,...)
{
va_list args;
char buffer[1024];
int buflen;
va_list args;
char buffer[1024];
int buflen;
Assert(str != NULL);
@ -164,7 +165,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
memcpy(str->data + str->len, data, datalen);
str->len += datalen;
/* Keep a trailing null in place, even though it's probably useless
/*
* Keep a trailing null in place, even though it's probably useless
* for binary data...
*/
str->data[str->len] = '\0';

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.35 1999/04/16 04:59:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.36 1999/05/25 16:08:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,13 +75,13 @@ static int map_old_to_new(Port *port, UserAuth old, int status);
static int
pg_krb4_recvauth(Port *port)
{
long krbopts = 0; /* one-way authentication */
KTEXT_ST clttkt;
char instance[INST_SZ+1],
version[KRB_SENDAUTH_VLEN+1];
AUTH_DAT auth_data;
Key_schedule key_sched;
int status;
long krbopts = 0; /* one-way authentication */
KTEXT_ST clttkt;
char instance[INST_SZ + 1],
version[KRB_SENDAUTH_VLEN + 1];
AUTH_DAT auth_data;
Key_schedule key_sched;
int status;
strcpy(instance, "*"); /* don't care, but arg gets expanded
* anyway */
@ -99,7 +99,7 @@ pg_krb4_recvauth(Port *port)
if (status != KSUCCESS)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: kerberos error: %s\n", krb_err_txt[status]);
"pg_krb4_recvauth: kerberos error: %s\n", krb_err_txt[status]);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -107,7 +107,7 @@ pg_krb4_recvauth(Port *port)
if (strncmp(version, PG_KRB4_VERSION, KRB_SENDAUTH_VLEN))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: protocol version != \"%s\"\n", PG_KRB4_VERSION);
"pg_krb4_recvauth: protocol version != \"%s\"\n", PG_KRB4_VERSION);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -115,8 +115,8 @@ pg_krb4_recvauth(Port *port)
if (strncmp(port->user, auth_data.pname, SM_USER))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: name \"%s\" != \"%s\"\n",
port->user, auth_data.pname);
"pg_krb4_recvauth: name \"%s\" != \"%s\"\n",
port->user, auth_data.pname);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -129,7 +129,7 @@ static int
pg_krb4_recvauth(Port *port)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: Kerberos not implemented on this server.\n");
"pg_krb4_recvauth: Kerberos not implemented on this server.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -223,7 +223,7 @@ pg_krb5_recvauth(Port *port)
if (code = krb5_parse_name(servbuf, &server))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos error %d in krb5_parse_name\n", code);
"pg_krb5_recvauth: Kerberos error %d in krb5_parse_name\n", code);
com_err("pg_krb5_recvauth", code, "in krb5_parse_name");
return STATUS_ERROR;
}
@ -256,7 +256,7 @@ pg_krb5_recvauth(Port *port)
(krb5_authenticator **) NULL))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos error %d in krb5_recvauth\n", code);
"pg_krb5_recvauth: Kerberos error %d in krb5_recvauth\n", code);
com_err("pg_krb5_recvauth", code, "in krb5_recvauth");
krb5_free_principal(server);
return STATUS_ERROR;
@ -271,7 +271,7 @@ pg_krb5_recvauth(Port *port)
if ((code = krb5_unparse_name(client, &kusername)))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos error %d in krb5_unparse_name\n", code);
"pg_krb5_recvauth: Kerberos error %d in krb5_unparse_name\n", code);
com_err("pg_krb5_recvauth", code, "in krb5_unparse_name");
krb5_free_principal(client);
return STATUS_ERROR;
@ -280,7 +280,7 @@ pg_krb5_recvauth(Port *port)
if (!kusername)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: could not decode username\n");
"pg_krb5_recvauth: could not decode username\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -289,7 +289,7 @@ pg_krb5_recvauth(Port *port)
if (strncmp(username, kusername, SM_USER))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: name \"%s\" != \"%s\"\n", port->user, kusername);
"pg_krb5_recvauth: name \"%s\" != \"%s\"\n", port->user, kusername);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
pfree(kusername);
@ -304,7 +304,7 @@ static int
pg_krb5_recvauth(Port *port)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos not implemented on this server.\n");
"pg_krb5_recvauth: Kerberos not implemented on this server.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -359,7 +359,7 @@ pg_passwordv0_recvauth(void *arg, PacketLen len, void *pkt)
if (user == NULL || password == NULL)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_password_recvauth: badly formed password packet.\n");
"pg_password_recvauth: badly formed password packet.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -405,7 +405,7 @@ pg_passwordv0_recvauth(void *arg, PacketLen len, void *pkt)
void
auth_failed(Port *port)
{
char buffer[512];
char buffer[512];
const char *authmethod = "Unknown auth method:";
switch (port->auth_method)
@ -449,9 +449,9 @@ be_recvauth(Port *port)
/*
* Get the authentication method to use for this frontend/database
* combination. Note: a failure return indicates a problem with
* the hba config file, not with the request. hba.c should have
* dropped an error message into the postmaster logfile if it failed.
* combination. Note: a failure return indicates a problem with the
* hba config file, not with the request. hba.c should have dropped
* an error message into the postmaster logfile if it failed.
*/
if (hba_getauthmethod(&port->raddr, port->user, port->database,
@ -470,27 +470,28 @@ be_recvauth(Port *port)
{
/* Handle new style authentication. */
AuthRequest areq = AUTH_REQ_OK;
PacketDoneProc auth_handler = NULL;
AuthRequest areq = AUTH_REQ_OK;
PacketDoneProc auth_handler = NULL;
switch (port->auth_method)
{
case uaReject:
/*
* This could have come from an explicit "reject" entry
* in pg_hba.conf, but more likely it means there was no
* matching entry. Take pity on the poor user and issue
* a helpful error message. NOTE: this is not a security
* breach, because all the info reported here is known
* at the frontend and must be assumed known to bad guys.
* This could have come from an explicit "reject" entry in
* pg_hba.conf, but more likely it means there was no
* matching entry. Take pity on the poor user and issue a
* helpful error message. NOTE: this is not a security
* breach, because all the info reported here is known at
* the frontend and must be assumed known to bad guys.
* We're merely helping out the less clueful good guys.
* NOTE 2: libpq-be.h defines the maximum error message
* length as 99 characters. It probably wouldn't hurt
* anything to increase it, but there might be some
* client out there that will fail. So, be terse.
* anything to increase it, but there might be some client
* out there that will fail. So, be terse.
*/
{
char buffer[512];
char buffer[512];
const char *hostinfo = "localhost";
if (port->raddr.sa.sa_family == AF_INET)

View File

@ -6,7 +6,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: be-dumpdata.c,v 1.23 1999/05/10 00:45:08 momjian Exp $
* $Id: be-dumpdata.c,v 1.24 1999/05/25 16:08:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -130,8 +130,8 @@ static u_int be_portalcnt = 0;
PortalEntry *
be_newportal(void)
{
PortalEntry *entry;
char buf[PortalNameLength];
PortalEntry *entry;
char buf[PortalNameLength];
/* ----------------
* generate a new name
@ -208,7 +208,7 @@ be_typeinit(PortalEntry *entry,
* ----------------
*/
void
be_printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
be_printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
int i;
Datum attr;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.32 1999/05/10 00:45:09 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.33 1999/05/25 16:08:57 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@ -131,8 +131,8 @@ lo_close(int fd)
int
lo_read(int fd, char *buf, int len)
{
MemoryContext currentContext;
int status;
MemoryContext currentContext;
int status;
if (fd < 0 || fd >= MAX_LOBJ_FDS)
{
@ -144,19 +144,19 @@ lo_read(int fd, char *buf, int len)
elog(ERROR, "lo_read: invalid large obj descriptor (%d)", fd);
return -3;
}
currentContext = MemoryContextSwitchTo((MemoryContext) fscxt);
currentContext = MemoryContextSwitchTo((MemoryContext) fscxt);
status = inv_read(cookies[fd], buf, len);
MemoryContextSwitchTo(currentContext);
return(status);
return (status);
}
int
lo_write(int fd, char *buf, int len)
{
MemoryContext currentContext;
int status;
MemoryContext currentContext;
int status;
if (fd < 0 || fd >= MAX_LOBJ_FDS)
{
@ -168,12 +168,12 @@ lo_write(int fd, char *buf, int len)
elog(ERROR, "lo_write: invalid large obj descriptor (%d)", fd);
return -3;
}
currentContext = MemoryContextSwitchTo((MemoryContext) fscxt);
currentContext = MemoryContextSwitchTo((MemoryContext) fscxt);
status = inv_write(cookies[fd], buf, len);
MemoryContextSwitchTo(currentContext);
return(status);
return (status);
}
@ -374,9 +374,7 @@ lo_export(Oid lobjId, text *filename)
*/
lobj = inv_open(lobjId, INV_READ);
if (lobj == NULL)
{
elog(ERROR, "lo_export: can't open inv object %u", lobjId);
}
/*
* open the file to be written to

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* crypt.c
* Look into pg_shadow and check the encrypted password with
* Look into pg_shadow and check the encrypted password with
* the one passed in from the frontend.
*
* Modification History
@ -9,7 +9,7 @@
* Dec 17, 1997 - Todd A. Brandys
* Orignal Version Completed.
*
* $Id: crypt.c,v 1.16 1999/05/09 00:54:30 tgl Exp $
* $Id: crypt.c,v 1.17 1999/05/25 16:08:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -38,8 +38,8 @@ char *
crypt_getpwdfilename()
{
static char *pfnam = NULL;
int bufsize;
static char *pfnam = NULL;
int bufsize;
bufsize = strlen(DataDir) + strlen(CRYPT_PWD_FILE) + 2;
pfnam = (char *) palloc(bufsize);
@ -54,9 +54,9 @@ char *
crypt_getpwdreloadfilename()
{
static char *rpfnam = NULL;
char *pwdfilename;
int bufsize;
static char *rpfnam = NULL;
char *pwdfilename;
int bufsize;
pwdfilename = crypt_getpwdfilename();
bufsize = strlen(pwdfilename) + strlen(CRYPT_PWD_RELOAD_SUFX) + 1;
@ -147,9 +147,7 @@ crypt_loadpwdfile()
{ /* free the old data only if this is a
* reload */
while (pwd_cache_count--)
{
pfree((void *) pwd_cache[pwd_cache_count]);
}
pfree((void *) pwd_cache);
pwd_cache = NULL;
pwd_cache_count = 0;
@ -226,9 +224,9 @@ int
crypt_getloginfo(const char *user, char **passwd, char **valuntil)
{
char *pwd,
*valdate;
void *fakeout;
char *pwd,
*valdate;
void *fakeout;
*passwd = NULL;
*valuntil = NULL;
@ -236,8 +234,8 @@ crypt_getloginfo(const char *user, char **passwd, char **valuntil)
if (pwd_cache)
{
char **pwd_entry;
char user_search[NAMEDATALEN + 2];
char **pwd_entry;
char user_search[NAMEDATALEN + 2];
snprintf(user_search, NAMEDATALEN + 2, "%s\t", user);
fakeout = (void *) &user_search;
@ -261,28 +259,22 @@ int
crypt_verify(Port *port, const char *user, const char *pgpass)
{
char *passwd,
*valuntil,
*crypt_pwd;
int retval = STATUS_ERROR;
AbsoluteTime vuntil,
current;
char *passwd,
*valuntil,
*crypt_pwd;
int retval = STATUS_ERROR;
AbsoluteTime vuntil,
current;
if (crypt_getloginfo(user, &passwd, &valuntil) == STATUS_ERROR)
{
return STATUS_ERROR;
}
if (passwd == NULL || *passwd == '\0')
{
if (passwd)
{
pfree((void *) passwd);
}
if (valuntil)
{
pfree((void *) valuntil);
}
return STATUS_ERROR;
}
@ -291,38 +283,29 @@ crypt_verify(Port *port, const char *user, const char *pgpass)
* authentication method being used for this connection.
*/
crypt_pwd =
(port->auth_method == uaCrypt ? crypt(passwd, port->salt) : passwd);
crypt_pwd =
(port->auth_method == uaCrypt ? crypt(passwd, port->salt) : passwd);
if (!strcmp(pgpass, crypt_pwd))
{
/*
* check here to be sure we are not past valuntil
*/
if (!valuntil || strcmp(valuntil, "\\N") == 0)
{
vuntil = INVALID_ABSTIME;
}
else
{
vuntil = nabstimein(valuntil);
}
current = GetCurrentAbsoluteTime();
if (vuntil != INVALID_ABSTIME && vuntil < current)
{
retval = STATUS_ERROR;
}
else
{
retval = STATUS_OK;
}
}
pfree((void *) passwd);
if (valuntil)
{
pfree((void *) valuntil);
}
return retval;
}

View File

@ -5,7 +5,7 @@
* wherein you authenticate a user by seeing what IP address the system
* says he comes from and possibly using ident).
*
* $Id: hba.c,v 1.42 1999/05/10 15:17:16 momjian Exp $
* $Id: hba.c,v 1.43 1999/05/25 16:08:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -286,7 +286,7 @@ process_hba_record(FILE *file, SockAddr *raddr, const char *user,
syntax:
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"process_hba_record: invalid syntax in pg_hba.conf file\n");
"process_hba_record: invalid syntax in pg_hba.conf file\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -305,14 +305,15 @@ process_open_config_file(FILE *file, SockAddr *raddr, const char *user,
This function does the same thing as find_hba_entry, only with
the config file already open on stream descriptor "file".
----------------------------------------------------------------------------*/
bool found_entry = false; /* found an applicable entry? */
bool error = false; /* found an erroneous entry? */
bool eof = false; /* end of hba file */
bool found_entry = false; /* found an applicable entry? */
bool error = false; /* found an erroneous entry? */
bool eof = false; /* end of hba file */
while (!eof && !found_entry && !error)
{
/* Process a line from the config file */
int c = getc(file);
int c = getc(file);
if (c == EOF)
eof = true;
else
@ -347,7 +348,7 @@ find_hba_entry(SockAddr *raddr, const char *user, const char *database,
* Read the config file and find an entry that allows connection from
* host "raddr", user "user", to database "database". If found,
* return *hba_ok_p = true and *userauth_p and *auth_arg representing
* the contents of that entry. If there is no matching entry, we
* the contents of that entry. If there is no matching entry, we
* set *hba_ok_p = true, *userauth_p = uaReject.
*
* If the config file is unreadable or contains invalid syntax, we
@ -355,15 +356,15 @@ find_hba_entry(SockAddr *raddr, const char *user, const char *database,
* and return without changing *hba_ok_p.
*
* If we find a file by the old name of the config file (pg_hba), we issue
* an error message because it probably needs to be converted. He didn't
* an error message because it probably needs to be converted. He didn't
* follow directions and just installed his old hba file in the new database
* system.
*/
int fd,
int fd,
bufsize;
FILE *file; /* The config file we have to read */
char *old_conf_file;
FILE *file; /* The config file we have to read */
char *old_conf_file;
/* The name of old config file that better not exist. */
@ -387,14 +388,15 @@ find_hba_entry(SockAddr *raddr, const char *user, const char *database,
"A file exists by the name used for host-based authentication "
"in prior releases of Postgres (%s). The name and format of "
"the configuration file have changed, so this file should be "
"converted.\n",
old_conf_file);
"converted.\n",
old_conf_file);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
}
else
{
char *conf_file; /* The name of the config file we have to read */
char *conf_file; /* The name of the config file we have to
* read */
/* put together the full pathname to the config file */
bufsize = (strlen(DataDir) + strlen(CONF_FILE) + 2) * sizeof(char);
@ -407,17 +409,17 @@ find_hba_entry(SockAddr *raddr, const char *user, const char *database,
/* The open of the config file failed. */
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"find_hba_entry: Host-based authentication config file "
"does not exist or permissions are not setup correctly! "
"Unable to open file \"%s\".\n",
conf_file);
"find_hba_entry: Host-based authentication config file "
"does not exist or permissions are not setup correctly! "
"Unable to open file \"%s\".\n",
conf_file);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
}
else
{
process_open_config_file(file, raddr, user, database, hba_ok_p,
userauth_p, auth_arg);
process_open_config_file(file, raddr, user, database, hba_ok_p,
userauth_p, auth_arg);
FreeFile(file);
}
pfree(conf_file);
@ -531,16 +533,18 @@ ident(const struct in_addr remote_ip_addr, const struct in_addr local_ip_addr,
----------------------------------------------------------------------------*/
int sock_fd, /* File descriptor for socket on which we talk to Ident */
rc; /* Return code from a locally called function */
int sock_fd, /* File descriptor for socket on which we
* talk to Ident */
rc; /* Return code from a locally called
* function */
sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
if (sock_fd == -1)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Failed to create socket on which to talk to Ident server. "
"socket() returned errno = %s (%d)\n",
strerror(errno), errno);
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Failed to create socket on which to talk to Ident server. "
"socket() returned errno = %s (%d)\n",
strerror(errno), errno);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
}
@ -559,66 +563,66 @@ ident(const struct in_addr remote_ip_addr, const struct in_addr local_ip_addr,
/*
* Bind to the address which the client originally contacted,
* otherwise the ident server won't be able to match up the
* right connection. This is necessary if the PostgreSQL
* server is running on an IP alias.
* otherwise the ident server won't be able to match up the right
* connection. This is necessary if the PostgreSQL server is
* running on an IP alias.
*/
memset(&la, 0, sizeof(la));
la.sin_family = AF_INET;
la.sin_addr = local_ip_addr;
rc = bind(sock_fd, (struct sockaddr *) &la, sizeof(la));
rc = bind(sock_fd, (struct sockaddr *) & la, sizeof(la));
if (rc == 0)
{
rc = connect(sock_fd,
(struct sockaddr *) & ident_server, sizeof(ident_server));
(struct sockaddr *) & ident_server, sizeof(ident_server));
}
if (rc != 0)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Unable to connect to Ident server on the host which is "
"trying to connect to Postgres "
"(IP address %s, Port %d). "
"errno = %s (%d)\n",
inet_ntoa(remote_ip_addr), IDENT_PORT, strerror(errno), errno);
"Unable to connect to Ident server on the host which is "
"trying to connect to Postgres "
"(IP address %s, Port %d). "
"errno = %s (%d)\n",
inet_ntoa(remote_ip_addr), IDENT_PORT, strerror(errno), errno);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
*ident_failed = true;
}
else
{
char ident_query[80];
char ident_query[80];
/* The query we send to the Ident server */
snprintf(ident_query, 80, "%d,%d\n",
ntohs(remote_port), ntohs(local_port));
ntohs(remote_port), ntohs(local_port));
rc = send(sock_fd, ident_query, strlen(ident_query), 0);
if (rc < 0)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Unable to send query to Ident server on the host which is "
"Unable to send query to Ident server on the host which is "
"trying to connect to Postgres (Host %s, Port %d),"
"even though we successfully connected to it. "
"errno = %s (%d)\n",
inet_ntoa(remote_ip_addr), IDENT_PORT, strerror(errno), errno);
"even though we successfully connected to it. "
"errno = %s (%d)\n",
inet_ntoa(remote_ip_addr), IDENT_PORT, strerror(errno), errno);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
*ident_failed = true;
}
else
{
char ident_response[80 + IDENT_USERNAME_MAX];
char ident_response[80 + IDENT_USERNAME_MAX];
rc = recv(sock_fd, ident_response, sizeof(ident_response) - 1, 0);
if (rc < 0)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Unable to receive response from Ident server "
"on the host which is "
"trying to connect to Postgres (Host %s, Port %d),"
"even though we successfully sent our query to it. "
"errno = %s (%d)\n",
inet_ntoa(remote_ip_addr), IDENT_PORT,
strerror(errno), errno);
"Unable to receive response from Ident server "
"on the host which is "
"trying to connect to Postgres (Host %s, Port %d),"
"even though we successfully sent our query to it. "
"errno = %s (%d)\n",
inet_ntoa(remote_ip_addr), IDENT_PORT,
strerror(errno), errno);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
*ident_failed = true;
@ -676,8 +680,8 @@ parse_map_record(FILE *file,
return;
}
}
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Incomplete line in pg_ident: %s", file_map);
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"Incomplete line in pg_ident: %s", file_map);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
}
@ -760,29 +764,26 @@ verify_against_usermap(const char *pguser,
{
*checks_out_p = false;
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"verify_against_usermap: hba configuration file does not "
"have the usermap field filled in in the entry that pertains "
"to this connection. That field is essential for Ident-based "
"authentication.\n");
"verify_against_usermap: hba configuration file does not "
"have the usermap field filled in in the entry that pertains "
"to this connection. That field is essential for Ident-based "
"authentication.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
}
else if (strcmp(usermap_name, "sameuser") == 0)
{
if (strcmp(ident_username, pguser) == 0)
{
*checks_out_p = true;
}
else
{
*checks_out_p = false;
}
}
else
{
FILE *file; /* The map file we have to read */
char *map_file; /* The name of the map file we have to read */
int bufsize;
FILE *file; /* The map file we have to read */
char *map_file; /* The name of the map file we have to
* read */
int bufsize;
/* put together the full pathname to the map file */
bufsize = (strlen(DataDir) + strlen(USERMAP_FILE) + 2) * sizeof(char);
@ -801,11 +802,11 @@ verify_against_usermap(const char *pguser,
*checks_out_p = false;
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"verify_against_usermap: usermap file for Ident-based "
"authentication "
"does not exist or permissions are not setup correctly! "
"Unable to open file \"%s\".\n",
map_file);
"verify_against_usermap: usermap file for Ident-based "
"authentication "
"does not exist or permissions are not setup correctly! "
"Unable to open file \"%s\".\n",
map_file);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
}
@ -945,21 +946,21 @@ InRange(char *buf, int host)
void
GetCharSetByHost(char *TableName, int host, const char *DataDir)
{
FILE *file;
char buf[MAX_TOKEN],
FILE *file;
char buf[MAX_TOKEN],
BaseCharset[MAX_TOKEN],
OrigCharset[MAX_TOKEN],
DestCharset[MAX_TOKEN],
HostCharset[MAX_TOKEN],
c,
eof = false,
*map_file;
int key = 0,
*map_file;
int key = 0,
ChIndex = 0,
i,
bufsize;
struct CharsetItem *ChArray[MAX_CHARSETS];
struct CharsetItem *ChArray[MAX_CHARSETS];
*TableName = '\0';
bufsize = (strlen(DataDir) + strlen(CHARSET_FILE) + 2) * sizeof(char);
@ -971,9 +972,7 @@ GetCharSetByHost(char *TableName, int host, const char *DataDir)
file = AllocateFile(map_file, "rb");
#endif
if (file == NULL)
{
return;
}
while (!eof)
{
c = getc(file);
@ -1033,8 +1032,8 @@ GetCharSetByHost(char *TableName, int host, const char *DataDir)
next_token(file, buf, sizeof(buf));
if (buf[0] != '\0')
{
ChArray[ChIndex] =
(struct CharsetItem *) palloc(sizeof(struct CharsetItem));
ChArray[ChIndex] =
(struct CharsetItem *) palloc(sizeof(struct CharsetItem));
strcpy(ChArray[ChIndex]->Orig, OrigCharset);
strcpy(ChArray[ChIndex]->Dest, DestCharset);
strcpy(ChArray[ChIndex]->Table, buf);

View File

@ -1,7 +1,7 @@
/*
/*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: password.c,v 1.20 1999/01/17 06:18:26 momjian Exp $
* $Id: password.c,v 1.21 1999/05/25 16:09:00 momjian Exp $
*
*/
@ -35,8 +35,8 @@ verify_password(char *auth_arg, char *user, char *password)
if (!pw_file)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"verify_password: couldn't open password file '%s'\n",
pw_file_fullname);
"verify_password: couldn't open password file '%s'\n",
pw_file_fullname);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -80,8 +80,8 @@ verify_password(char *auth_arg, char *user, char *password)
}
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"verify_password: password mismatch for '%s'.\n",
user);
"verify_password: password mismatch for '%s'.\n",
user);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -92,8 +92,8 @@ verify_password(char *auth_arg, char *user, char *password)
}
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"verify_password: user '%s' not found in password file.\n",
user);
"verify_password: user '%s' not found in password file.\n",
user);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: portal.c,v 1.21 1999/04/25 03:19:20 tgl Exp $
* $Id: portal.c,v 1.22 1999/05/25 16:09:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -50,7 +50,7 @@
* see utils/mmgr/portalmem.c for why. -cim 2/22/91
*
*/
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <postgres.h>
@ -71,7 +71,7 @@ in_range(char *msg, int value, int min, int max)
if (value < min || value >= max)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"FATAL: %s, %d is not in range [%d,%d)\n", msg, value, min, max);
"FATAL: %s, %d is not in range [%d,%d)\n", msg, value, min, max);
pqdebug("%s", PQerrormsg);
fputs(PQerrormsg, stderr);
return 0;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.13 1999/02/13 23:15:46 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.14 1999/05/25 16:09:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -63,7 +63,7 @@ size_t portals_array_size = 0;
/* GlobalMemory portals_mmcxt = (GlobalMemory) NULL; */
/* -------------------------------
* portals_realloc
* portals_realloc
* grow the size of the portals array by size
*
* also ensures that elements are initially NULL

View File

@ -28,7 +28,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: pqcomm.c,v 1.71 1999/05/21 01:25:06 tgl Exp $
* $Id: pqcomm.c,v 1.72 1999/05/25 16:09:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -53,7 +53,7 @@
*
* message-level I/O (and COPY OUT cruft):
* pq_putmessage - send a normal message (suppressed in COPY OUT mode)
* pq_startcopyout - inform libpq that a COPY OUT transfer is beginning
* pq_startcopyout - inform libpq that a COPY OUT transfer is beginning
* pq_endcopyout - end a COPY OUT transfer
*
*------------------------
@ -90,7 +90,7 @@
#define SOMAXCONN 5 /* from Linux listen(2) man page */
#endif /* SOMAXCONN */
extern FILE * debug_port; /* in util.c */
extern FILE *debug_port; /* in util.c */
/*
* Buffers for low-level I/O
@ -99,11 +99,13 @@ extern FILE * debug_port; /* in util.c */
#define PQ_BUFFER_SIZE 8192
static unsigned char PqSendBuffer[PQ_BUFFER_SIZE];
static int PqSendPointer; /* Next index to store a byte in PqSendBuffer */
static int PqSendPointer; /* Next index to store a byte in
* PqSendBuffer */
static unsigned char PqRecvBuffer[PQ_BUFFER_SIZE];
static int PqRecvPointer; /* Next index to read a byte from PqRecvBuffer */
static int PqRecvLength; /* End of data available in PqRecvBuffer */
static int PqRecvPointer; /* Next index to read a byte from
* PqRecvBuffer */
static int PqRecvLength; /* End of data available in PqRecvBuffer */
/*
* Message status
@ -121,7 +123,7 @@ pq_init(void)
PqSendPointer = PqRecvPointer = PqRecvLength = 0;
DoingCopyOut = false;
if (getenv("LIBPQ_DEBUG"))
debug_port = stderr;
debug_port = stderr;
}
/* --------------------------------
@ -187,8 +189,10 @@ StreamServerPort(char *hostName, short portName, int *fdP)
family;
size_t len;
int one = 1;
#ifdef HAVE_FCNTL_SETLK
int lock_fd;
#endif
family = ((hostName != NULL) ? AF_INET : AF_UNIX);
@ -204,19 +208,20 @@ StreamServerPort(char *hostName, short portName, int *fdP)
}
#ifdef ONLY_REUSE_INET_SOCKETS
if (family == AF_INET) {
if (family == AF_INET)
{
#endif
if ((setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &one,
sizeof(one))) == -1)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"FATAL: StreamServerPort: setsockopt(SO_REUSEADDR) failed: %s\n",
strerror(errno));
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
}
if ((setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &one,
sizeof(one))) == -1)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"FATAL: StreamServerPort: setsockopt(SO_REUSEADDR) failed: %s\n",
strerror(errno));
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
}
#ifdef ONLY_REUSE_INET_SOCKETS
}
@ -240,9 +245,10 @@ StreamServerPort(char *hostName, short portName, int *fdP)
if ((lock_fd = open(sock_path, O_WRONLY | O_NONBLOCK | O_BINARY, 0666)) >= 0)
#endif
{
struct flock lck;
lck.l_whence = SEEK_SET; lck.l_start = lck.l_len = 0;
struct flock lck;
lck.l_whence = SEEK_SET;
lck.l_start = lck.l_len = 0;
lck.l_type = F_WRLCK;
if (fcntl(lock_fd, F_SETLK, &lck) == 0)
{
@ -253,7 +259,7 @@ StreamServerPort(char *hostName, short portName, int *fdP)
TPRINTF(TRACE_VERBOSE, "flock failed for %s", sock_path);
close(lock_fd);
}
#endif /* HAVE_FCNTL_SETLK */
#endif /* HAVE_FCNTL_SETLK */
}
else
{
@ -277,9 +283,7 @@ StreamServerPort(char *hostName, short portName, int *fdP)
sock_path);
}
else
{
strcat(PQerrormsg, "\tIf not, wait a few seconds and retry.\n");
}
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -300,14 +304,15 @@ StreamServerPort(char *hostName, short portName, int *fdP)
if ((lock_fd = open(sock_path, O_WRONLY | O_NONBLOCK | O_BINARY, 0666)) >= 0)
#endif
{
struct flock lck;
lck.l_whence = SEEK_SET; lck.l_start = lck.l_len = 0;
struct flock lck;
lck.l_whence = SEEK_SET;
lck.l_start = lck.l_len = 0;
lck.l_type = F_WRLCK;
if (fcntl(lock_fd, F_SETLK, &lck) != 0)
TPRINTF(TRACE_VERBOSE, "flock error for %s", sock_path);
}
#endif /* HAVE_FCNTL_SETLK */
#endif /* HAVE_FCNTL_SETLK */
}
listen(fd, SOMAXCONN);
@ -339,7 +344,7 @@ StreamServerPort(char *hostName, short portName, int *fdP)
int
StreamConnection(int server_fd, Port *port)
{
SOCKET_SIZE_TYPE addrlen;
SOCKET_SIZE_TYPE addrlen;
/* accept connection (and fill in the client (remote) address) */
addrlen = sizeof(port->raddr);
@ -419,8 +424,8 @@ pq_recvbuf(void)
if (PqRecvLength > PqRecvPointer)
{
/* still some unread data, left-justify it in the buffer */
memmove(PqRecvBuffer, PqRecvBuffer+PqRecvPointer,
PqRecvLength-PqRecvPointer);
memmove(PqRecvBuffer, PqRecvBuffer + PqRecvPointer,
PqRecvLength - PqRecvPointer);
PqRecvLength -= PqRecvPointer;
PqRecvPointer = 0;
}
@ -431,16 +436,19 @@ pq_recvbuf(void)
/* Can fill buffer from PqRecvLength and upwards */
for (;;)
{
int r = recv(MyProcPort->sock, PqRecvBuffer + PqRecvLength,
PQ_BUFFER_SIZE - PqRecvLength, 0);
int r = recv(MyProcPort->sock, PqRecvBuffer + PqRecvLength,
PQ_BUFFER_SIZE - PqRecvLength, 0);
if (r < 0)
{
if (errno == EINTR)
continue; /* Ok if interrupted */
/* We would like to use elog() here, but dare not because elog
* tries to write to the client, which will cause problems
* if we have a hard communications failure ...
* So just write the message to the postmaster log.
/*
* We would like to use elog() here, but dare not because elog
* tries to write to the client, which will cause problems if
* we have a hard communications failure ... So just write the
* message to the postmaster log.
*/
fprintf(stderr, "pq_recvbuf: recv() failed: %s\n",
strerror(errno));
@ -499,7 +507,7 @@ pq_peekbyte(void)
int
pq_getbytes(char *s, size_t len)
{
size_t amount;
size_t amount;
while (len > 0)
{
@ -539,8 +547,8 @@ pq_getstring(char *s, size_t len)
int c;
/*
* Keep on reading until we get the terminating '\0',
* discarding any bytes we don't have room for.
* Keep on reading until we get the terminating '\0', discarding any
* bytes we don't have room for.
*/
while ((c = pq_getbyte()) != EOF && c != '\0')
@ -570,7 +578,7 @@ pq_getstring(char *s, size_t len)
int
pq_putbytes(const char *s, size_t len)
{
size_t amount;
size_t amount;
while (len > 0)
{
@ -602,19 +610,24 @@ pq_flush(void)
while (bufptr < bufend)
{
int r = send(MyProcPort->sock, bufptr, bufend - bufptr, 0);
int r = send(MyProcPort->sock, bufptr, bufend - bufptr, 0);
if (r <= 0)
{
if (errno == EINTR)
continue; /* Ok if we were interrupted */
/* We would like to use elog() here, but cannot because elog
/*
* We would like to use elog() here, but cannot because elog
* tries to write to the client, which would cause a recursive
* flush attempt! So just write it out to the postmaster log.
*/
fprintf(stderr, "pq_flush: send() failed: %s\n",
strerror(errno));
/* We drop the buffered data anyway so that processing
* can continue, even though we'll probably quit soon.
/*
* We drop the buffered data anyway so that processing can
* continue, even though we'll probably quit soon.
*/
PqSendPointer = 0;
return EOF;
@ -661,7 +674,7 @@ pq_putmessage(char msgtype, const char *s, size_t len)
}
/* --------------------------------
* pq_startcopyout - inform libpq that a COPY OUT transfer is beginning
* pq_startcopyout - inform libpq that a COPY OUT transfer is beginning
* --------------------------------
*/
void
@ -682,7 +695,7 @@ pq_startcopyout(void)
void
pq_endcopyout(bool errorAbort)
{
if (! DoingCopyOut)
if (!DoingCopyOut)
return;
if (errorAbort)
pq_putbytes("\n\n\\.\n", 5);

View File

@ -15,14 +15,14 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: pqformat.c,v 1.3 1999/04/25 21:50:56 tgl Exp $
* $Id: pqformat.c,v 1.4 1999/05/25 16:09:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* INTERFACE ROUTINES
* Message assembly and output:
* pq_beginmessage - initialize StringInfo buffer
* pq_beginmessage - initialize StringInfo buffer
* pq_sendbyte - append a raw byte to a StringInfo buffer
* pq_sendint - append a binary integer to a StringInfo buffer
* pq_sendbytes - append raw data to a StringInfo buffer
@ -126,6 +126,7 @@ pq_sendcountedtext(StringInfo buf, const char *str, int slen)
{
#ifdef MULTIBYTE
const char *p;
p = (const char *) pg_server_to_client((unsigned char *) str, slen);
if (p != str) /* actual conversion has been done? */
{
@ -147,9 +148,11 @@ pq_sendcountedtext(StringInfo buf, const char *str, int slen)
void
pq_sendstring(StringInfo buf, const char *str)
{
int slen = strlen(str);
int slen = strlen(str);
#ifdef MULTIBYTE
const char *p;
p = (const char *) pg_server_to_client((unsigned char *) str, slen);
if (p != str) /* actual conversion has been done? */
{
@ -157,7 +160,7 @@ pq_sendstring(StringInfo buf, const char *str)
slen = strlen(str);
}
#endif
appendBinaryStringInfo(buf, str, slen+1);
appendBinaryStringInfo(buf, str, slen + 1);
}
/* --------------------------------
@ -167,9 +170,9 @@ pq_sendstring(StringInfo buf, const char *str)
void
pq_sendint(StringInfo buf, int i, int b)
{
unsigned char n8;
uint16 n16;
uint32 n32;
unsigned char n8;
uint16 n16;
uint32 n32;
switch (b)
{
@ -225,9 +228,11 @@ pq_endmessage(StringInfo buf)
int
pq_puttextmessage(char msgtype, const char *str)
{
int slen = strlen(str);
int slen = strlen(str);
#ifdef MULTIBYTE
const char *p;
p = (const char *) pg_server_to_client((unsigned char *) str, slen);
if (p != str) /* actual conversion has been done? */
{
@ -235,7 +240,7 @@ pq_puttextmessage(char msgtype, const char *str)
slen = strlen(str);
}
#endif
return pq_putmessage(msgtype, str, slen+1);
return pq_putmessage(msgtype, str, slen + 1);
}
/* --------------------------------
@ -247,10 +252,10 @@ pq_puttextmessage(char msgtype, const char *str)
int
pq_getint(int *result, int b)
{
int status;
unsigned char n8;
uint16 n16;
uint32 n32;
int status;
unsigned char n8;
uint16 n16;
uint32 n32;
switch (b)
{
@ -269,8 +274,10 @@ pq_getint(int *result, int b)
ntoh_l(n32) : ntohl(n32));
break;
default:
/* if we elog(ERROR) here, we will lose sync with the frontend,
* so just complain to postmaster log instead...
/*
* if we elog(ERROR) here, we will lose sync with the
* frontend, so just complain to postmaster log instead...
*/
fprintf(stderr, "pq_getint: unsupported size %d\n", b);
status = EOF;
@ -293,23 +300,26 @@ int
pq_getstr(char *s, int maxlen)
{
int c;
#ifdef MULTIBYTE
char *p;
#endif
c = pq_getstring(s, maxlen);
#ifdef MULTIBYTE
p = (char*) pg_client_to_server((unsigned char *) s, strlen(s));
p = (char *) pg_client_to_server((unsigned char *) s, strlen(s));
if (p != s) /* actual conversion has been done? */
{
int newlen = strlen(p);
int newlen = strlen(p);
if (newlen < maxlen)
strcpy(s, p);
else
{
strncpy(s, p, maxlen);
s[maxlen-1] = '\0';
s[maxlen - 1] = '\0';
}
}
#endif

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: util.c,v 1.8 1999/02/13 23:15:49 momjian Exp $
* $Id: util.c,v 1.9 1999/05/25 16:09:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -17,7 +17,7 @@
* PQuntrace - turn off pqdebug() tracing
*/
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <postgres.h>

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.80 1999/05/18 21:34:27 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.81 1999/05/25 16:09:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -92,7 +92,7 @@ CopyPlanFields(Plan *from, Plan *newnode)
newnode->chgParam = listCopy(from->chgParam);
Node_Copy(from, newnode, initPlan);
if (from->subPlan != NULL)
newnode->subPlan = SS_pull_subplan((Node*) newnode->qual);
newnode->subPlan = SS_pull_subplan((Node *) newnode->qual);
else
newnode->subPlan = NULL;
newnode->nParamExec = from->nParamExec;
@ -138,10 +138,12 @@ _copyResult(Result *from)
*/
Node_Copy(from, newnode, resconstantqual);
/* We must add subplans in resconstantqual to the new plan's subPlan list
/*
* We must add subplans in resconstantqual to the new plan's subPlan
* list
*/
newnode->plan.subPlan = nconc(newnode->plan.subPlan,
SS_pull_subplan(newnode->resconstantqual));
SS_pull_subplan(newnode->resconstantqual));
return newnode;
}
@ -369,7 +371,7 @@ _copyHashJoin(HashJoin *from)
* ----------------
*/
static void
CopyNonameFields(Noname *from, Noname *newnode)
CopyNonameFields(Noname * from, Noname * newnode)
{
newnode->nonameid = from->nonameid;
newnode->keycount = from->keycount;
@ -382,7 +384,7 @@ CopyNonameFields(Noname *from, Noname *newnode)
* ----------------
*/
static Noname *
_copyNoname(Noname *from)
_copyNoname(Noname * from)
{
Noname *newnode = makeNode(Noname);
@ -466,9 +468,10 @@ _copyAgg(Agg *from)
CopyPlanFields((Plan *) from, (Plan *) newnode);
/* Cannot copy agg list; it must be rebuilt to point to subnodes of
/*
* Cannot copy agg list; it must be rebuilt to point to subnodes of
* new node.
*/
*/
set_agg_tlist_references(newnode);
return newnode;
@ -859,7 +862,7 @@ _copyFunc(Func *from)
* ----------------
*/
static Aggref *
_copyAggref(Aggref *from)
_copyAggref(Aggref * from)
{
Aggref *newnode = makeNode(Aggref);
@ -904,7 +907,7 @@ _copySubLink(SubLink *from)
* ----------------
*/
static CaseExpr *
_copyCaseExpr(CaseExpr *from)
_copyCaseExpr(CaseExpr * from)
{
CaseExpr *newnode = makeNode(CaseExpr);
@ -926,7 +929,7 @@ _copyCaseExpr(CaseExpr *from)
* ----------------
*/
static CaseWhen *
_copyCaseWhen(CaseWhen *from)
_copyCaseWhen(CaseWhen * from)
{
CaseWhen *newnode = makeNode(CaseWhen);
@ -1170,7 +1173,7 @@ _copyIndexPath(IndexPath *from)
* ----------------
*/
static void
CopyNestPathFields(NestPath *from, NestPath *newnode)
CopyNestPathFields(NestPath * from, NestPath * newnode)
{
Node_Copy(from, newnode, pathinfo);
Node_Copy(from, newnode, outerjoinpath);
@ -1182,7 +1185,7 @@ CopyNestPathFields(NestPath *from, NestPath *newnode)
* ----------------
*/
static NestPath *
_copyNestPath(NestPath *from)
_copyNestPath(NestPath * from)
{
NestPath *newnode = makeNode(NestPath);
@ -1316,7 +1319,7 @@ _copyMergeOrder(MergeOrder *from)
* ----------------
*/
static RestrictInfo *
_copyRestrictInfo(RestrictInfo *from)
_copyRestrictInfo(RestrictInfo * from)
{
RestrictInfo *newnode = makeNode(RestrictInfo);
@ -1371,9 +1374,9 @@ _copyJoinMethod(JoinMethod *from)
* ----------------
*/
static HashInfo *
_copyHashInfo(HashInfo *from)
_copyHashInfo(HashInfo * from)
{
HashInfo *newnode = makeNode(HashInfo);
HashInfo *newnode = makeNode(HashInfo);
/* ----------------
* copy remainder of node
@ -1390,9 +1393,9 @@ _copyHashInfo(HashInfo *from)
* ----------------
*/
static MergeInfo *
_copyMergeInfo(MergeInfo *from)
_copyMergeInfo(MergeInfo * from)
{
MergeInfo *newnode = makeNode(MergeInfo);
MergeInfo *newnode = makeNode(MergeInfo);
/* ----------------
* copy remainder of node
@ -1409,7 +1412,7 @@ _copyMergeInfo(MergeInfo *from)
* ----------------
*/
static JoinInfo *
_copyJoinInfo(JoinInfo *from)
_copyJoinInfo(JoinInfo * from)
{
JoinInfo *newnode = makeNode(JoinInfo);
@ -1493,9 +1496,9 @@ _copyRangeTblEntry(RangeTblEntry *from)
}
static RowMark *
_copyRowMark(RowMark *from)
_copyRowMark(RowMark * from)
{
RowMark *newnode = makeNode(RowMark);
RowMark *newnode = makeNode(RowMark);
newnode->rti = from->rti;
newnode->info = from->info;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.36 1999/05/12 15:01:33 wieck Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.37 1999/05/25 16:09:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -283,7 +283,7 @@ _equalFunc(Func *a, Func *b)
* RestrictInfo is a subclass of Node.
*/
static bool
_equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
_equalRestrictInfo(RestrictInfo * a, RestrictInfo * b)
{
Assert(IsA(a, RestrictInfo));
Assert(IsA(b, RestrictInfo));
@ -307,7 +307,7 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
* RelOptInfo is a subclass of Node.
*/
static bool
_equalRelOptInfo(RelOptInfo *a, RelOptInfo *b)
_equalRelOptInfo(RelOptInfo * a, RelOptInfo * b)
{
Assert(IsA(a, RelOptInfo));
Assert(IsA(b, RelOptInfo));
@ -392,7 +392,7 @@ _equalIndexPath(IndexPath *a, IndexPath *b)
}
static bool
_equalNestPath(NestPath *a, NestPath *b)
_equalNestPath(NestPath * a, NestPath * b)
{
Assert(IsA_JoinPath(a));
Assert(IsA_JoinPath(b));
@ -477,7 +477,7 @@ _equalMergeOrder(MergeOrder *a, MergeOrder *b)
}
static bool
_equalHashInfo(HashInfo *a, HashInfo *b)
_equalHashInfo(HashInfo * a, HashInfo * b)
{
Assert(IsA(a, HashInfo));
Assert(IsA(b, HashInfo));
@ -524,7 +524,7 @@ _equalSubPlan(SubPlan *a, SubPlan *b)
}
static bool
_equalJoinInfo(JoinInfo *a, JoinInfo *b)
_equalJoinInfo(JoinInfo * a, JoinInfo * b)
{
Assert(IsA(a, JoinInfo));
Assert(IsA(b, JoinInfo));
@ -574,10 +574,13 @@ _equalQuery(Query *a, Query *b)
return false;
if (a->resultRelation != b->resultRelation)
return false;
if (a->into && b->into) {
if (a->into && b->into)
{
if (strcmp(a->into, b->into) != 0)
return false;
} else {
}
else
{
if (a->into != b->into)
return false;
}
@ -593,10 +596,13 @@ _equalQuery(Query *a, Query *b)
return false;
if (a->hasSubLinks != b->hasSubLinks)
return false;
if (a->uniqueFlag && b->uniqueFlag) {
if (a->uniqueFlag && b->uniqueFlag)
{
if (strcmp(a->uniqueFlag, b->uniqueFlag) != 0)
return false;
} else {
}
else
{
if (a->uniqueFlag != b->uniqueFlag)
return false;
}
@ -623,10 +629,10 @@ _equalQuery(Query *a, Query *b)
if (!equal(a->limitCount, b->limitCount))
return false;
/* We do not check the internal-to-the-planner fields
* base_rel_list and join_rel_list. They might not be
* set yet, and in any case they should be derivable
* from the other fields.
/*
* We do not check the internal-to-the-planner fields base_rel_list
* and join_rel_list. They might not be set yet, and in any case they
* should be derivable from the other fields.
*/
return true;
}
@ -634,17 +640,23 @@ _equalQuery(Query *a, Query *b)
static bool
_equalRangeTblEntry(RangeTblEntry *a, RangeTblEntry *b)
{
if (a->relname && b->relname) {
if (a->relname && b->relname)
{
if (strcmp(a->relname, b->relname) != 0)
return false;
} else {
}
else
{
if (a->relname != b->relname)
return false;
}
if (a->refname && b->refname) {
if (a->refname && b->refname)
{
if (strcmp(a->refname, b->refname) != 0)
return false;
} else {
}
else
{
if (a->refname != b->refname)
return false;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/Attic/freefuncs.c,v 1.16 1999/05/12 15:01:33 wieck Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/Attic/freefuncs.c,v 1.17 1999/05/25 16:09:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -297,12 +297,12 @@ _freeHashJoin(HashJoin *node)
/* ----------------
* FreeNonameFields
*
* This function frees the fields of the Noname node. It is used by
* This function frees the fields of the Noname node. It is used by
* all the free functions for classes which inherit node Noname.
* ----------------
*/
static void
FreeNonameFields(Noname *node)
FreeNonameFields(Noname * node)
{
return;
}
@ -313,7 +313,7 @@ FreeNonameFields(Noname *node)
* ----------------
*/
static void
_freeNoname(Noname *node)
_freeNoname(Noname * node)
{
/* ----------------
* free node superclass fields
@ -562,7 +562,7 @@ _freeConst(Const *node)
* ----------------
*/
if (!node->constbyval)
pfree((void *)node->constvalue);
pfree((void *) node->constvalue);
pfree(node);
}
@ -609,7 +609,7 @@ _freeFunc(Func *node)
* ----------------
*/
static void
_freeAggref(Aggref *node)
_freeAggref(Aggref * node)
{
/* ----------------
* free remainder of node
@ -644,7 +644,7 @@ _freeSubLink(SubLink *node)
* ----------------
*/
static void
_freeCaseExpr(CaseExpr *node)
_freeCaseExpr(CaseExpr * node)
{
/* ----------------
* free remainder of node
@ -662,7 +662,7 @@ _freeCaseExpr(CaseExpr *node)
* ----------------
*/
static void
_freeCaseWhen(CaseWhen *node)
_freeCaseWhen(CaseWhen * node)
{
/* ----------------
* free remainder of node
@ -709,7 +709,7 @@ _freeArrayRef(ArrayRef *node)
* ----------------
*/
static void
_freeRelOptInfo(RelOptInfo *node)
_freeRelOptInfo(RelOptInfo * node)
{
/* ----------------
* free remainder of node
@ -757,8 +757,8 @@ FreePathFields(Path *node)
else
freeObject(node->pathorder->ord.merge);
pfree(node->pathorder); /* is it an object, but we don't have
separate free for it */
pfree(node->pathorder); /* is it an object, but we don't have
* separate free for it */
freeObject(node->pathkeys);
@ -812,7 +812,7 @@ _freeIndexPath(IndexPath *node)
* ----------------
*/
static void
FreeNestPathFields(NestPath *node)
FreeNestPathFields(NestPath * node)
{
freeObject(node->pathinfo);
freeObject(node->outerjoinpath);
@ -824,7 +824,7 @@ FreeNestPathFields(NestPath *node)
* ----------------
*/
static void
_freeNestPath(NestPath *node)
_freeNestPath(NestPath * node)
{
/* ----------------
* free the node superclass fields
@ -933,7 +933,7 @@ _freeMergeOrder(MergeOrder *node)
* ----------------
*/
static void
_freeRestrictInfo(RestrictInfo *node)
_freeRestrictInfo(RestrictInfo * node)
{
/* ----------------
* free remainder of node
@ -950,7 +950,7 @@ _freeRestrictInfo(RestrictInfo *node)
/* ----------------
* FreeJoinMethodFields
*
* This function frees the fields of the JoinMethod node. It is used by
* This function frees the fields of the JoinMethod node. It is used by
* all the free functions for classes which inherit node JoinMethod.
* ----------------
*/
@ -979,7 +979,7 @@ _freeJoinMethod(JoinMethod *node)
* ----------------
*/
static void
_freeHashInfo(HashInfo *node)
_freeHashInfo(HashInfo * node)
{
/* ----------------
* free remainder of node
@ -995,7 +995,7 @@ _freeHashInfo(HashInfo *node)
* ----------------
*/
static void
_freeMergeInfo(MergeInfo *node)
_freeMergeInfo(MergeInfo * node)
{
/* ----------------
* free remainder of node
@ -1012,7 +1012,7 @@ _freeMergeInfo(MergeInfo *node)
* ----------------
*/
static void
_freeJoinInfo(JoinInfo *node)
_freeJoinInfo(JoinInfo * node)
{
/* ----------------
* free remainder of node
@ -1066,7 +1066,7 @@ _freeRangeTblEntry(RangeTblEntry *node)
}
static void
_freeRowMark(RowMark *node)
_freeRowMark(RowMark * node)
{
pfree(node);
}
@ -1142,7 +1142,7 @@ _freeValue(Value *node)
{
switch (node->type)
{
case T_String:
case T_String:
pfree(node->val.str);
break;
default:
@ -1165,6 +1165,7 @@ freeObject(void *node)
switch (nodeTag(node))
{
/*
* PLAN NODES
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.20 1999/02/22 17:29:57 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.21 1999/05/25 16:09:08 momjian Exp $
*
* NOTES
* XXX a few of the following functions are duplicated to handle
@ -96,7 +96,7 @@ lconsi(int datum, List *list)
* lappend
*
* Add obj to the end of list, or make a new list if 'list' is NIL
*
*
* MORE EXPENSIVE THAN lcons
*/
List *
@ -161,6 +161,7 @@ nreverse(List *list)
lnext(list) = lnext(rlist);
return list;
}
#endif
/*
@ -307,6 +308,7 @@ append(List *l1, List *l2)
lnext(p) = newlist2;
return newlist;
}
#endif
#ifdef NOT_USED
@ -331,6 +333,7 @@ intAppend(List *l1, List *l2)
lnext(p) = newlist2;
return newlist;
}
#endif
/*
@ -435,7 +438,7 @@ member(void *l1, List *l2)
foreach(i, l2)
if (equal((Node *) l1, (Node *) lfirst(i)))
return true;
return true;
return false;
}

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: outfuncs.c,v 1.84 1999/05/19 16:46:11 momjian Exp $
* $Id: outfuncs.c,v 1.85 1999/05/25 16:09:09 momjian Exp $
*
* NOTES
* Every (plan) node in POSTGRES has an associated "out" routine which
@ -54,24 +54,22 @@ static void _outNode(StringInfo str, void *obj);
static void
_outIntList(StringInfo str, List *list)
{
List *l;
List *l;
appendStringInfo(str, "(");
foreach(l, list)
{
appendStringInfo(str, " %d ", lfirsti(l));
}
appendStringInfo(str, ")");
}
static void
_outCreateStmt(StringInfo str, CreateStmt *node)
{
appendStringInfo(str, " CREATE :relname %s ",
stringStringInfo(node->relname));
appendStringInfo(str, " CREATE :relname %s ",
stringStringInfo(node->relname));
appendStringInfo(str, " :istemp %s ",
node->istemp ? "true" : "false");
node->istemp ? "true" : "false");
appendStringInfo(str, " :columns ");
_outNode(str, node->tableElts);
@ -86,11 +84,11 @@ _outCreateStmt(StringInfo str, CreateStmt *node)
static void
_outIndexStmt(StringInfo str, IndexStmt *node)
{
appendStringInfo(str,
" INDEX :idxname %s :relname %s :accessMethod %s :indexParams ",
stringStringInfo(node->idxname),
stringStringInfo(node->relname),
stringStringInfo(node->accessMethod));
appendStringInfo(str,
" INDEX :idxname %s :relname %s :accessMethod %s :indexParams ",
stringStringInfo(node->idxname),
stringStringInfo(node->relname),
stringStringInfo(node->accessMethod));
_outNode(str, node->indexParams);
appendStringInfo(str, " :withClause ");
@ -103,8 +101,8 @@ _outIndexStmt(StringInfo str, IndexStmt *node)
_outNode(str, node->rangetable);
appendStringInfo(str, " :lossy %s :unique %s ",
node->lossy ? "true" : "false",
node->unique ? "true" : "false");
node->lossy ? "true" : "false",
node->unique ? "true" : "false");
}
static void
@ -125,24 +123,24 @@ static void
_outColumnDef(StringInfo str, ColumnDef *node)
{
appendStringInfo(str, " COLUMNDEF :colname %s :typename ",
stringStringInfo(node->colname));
stringStringInfo(node->colname));
_outNode(str, node->typename);
appendStringInfo(str, " :is_not_null %s :defval %s :constraints ",
node->is_not_null ? "true" : "false",
stringStringInfo(node->defval));
node->is_not_null ? "true" : "false",
stringStringInfo(node->defval));
_outNode(str, node->constraints);
}
static void
_outTypeName(StringInfo str, TypeName *node)
{
appendStringInfo(str,
" TYPENAME :name %s :timezone %s :setof %s typmod %d :arrayBounds ",
stringStringInfo(node->name),
node->timezone ? "true" : "false",
node->setof ? "true" : "false",
node->typmod);
appendStringInfo(str,
" TYPENAME :name %s :timezone %s :setof %s typmod %d :arrayBounds ",
stringStringInfo(node->name),
node->timezone ? "true" : "false",
node->setof ? "true" : "false",
node->typmod);
appendStringInfo(str, " :arrayBounds ");
_outNode(str, node->arrayBounds);
@ -152,7 +150,7 @@ static void
_outIndexElem(StringInfo str, IndexElem *node)
{
appendStringInfo(str, " INDEXELEM :name %s :args ",
stringStringInfo(node->name));
stringStringInfo(node->name));
_outNode(str, node->args);
appendStringInfo(str, " :class %s :typename ", stringStringInfo(node->class));
@ -171,20 +169,20 @@ _outQuery(StringInfo str, Query *node)
{
case T_CreateStmt:
appendStringInfo(str, " :create %s ",
stringStringInfo(((CreateStmt *) (node->utilityStmt))->relname));
stringStringInfo(((CreateStmt *) (node->utilityStmt))->relname));
_outNode(str, node->utilityStmt);
break;
case T_IndexStmt:
appendStringInfo(str, " :index %s on %s ",
stringStringInfo(((IndexStmt *) (node->utilityStmt))->idxname),
stringStringInfo(((IndexStmt *) (node->utilityStmt))->relname));
stringStringInfo(((IndexStmt *) (node->utilityStmt))->idxname),
stringStringInfo(((IndexStmt *) (node->utilityStmt))->relname));
_outNode(str, node->utilityStmt);
break;
case T_NotifyStmt:
appendStringInfo(str, " :utility %s ",
stringStringInfo(((NotifyStmt *) (node->utilityStmt))->relname));
stringStringInfo(((NotifyStmt *) (node->utilityStmt))->relname));
break;
default:
@ -192,21 +190,19 @@ _outQuery(StringInfo str, Query *node)
}
}
else
{
appendStringInfo(str, " :utility <>");
}
appendStringInfo(str,
" :resultRelation %u :into %s :isPortal %s :isBinary %s :isTemp %s :unionall %s ",
node->resultRelation,
stringStringInfo(node->into),
node->isPortal ? "true" : "false",
node->isBinary ? "true" : "false",
node->isTemp ? "true" : "false",
node->unionall ? "true" : "false");
appendStringInfo(str,
" :resultRelation %u :into %s :isPortal %s :isBinary %s :isTemp %s :unionall %s ",
node->resultRelation,
stringStringInfo(node->into),
node->isPortal ? "true" : "false",
node->isBinary ? "true" : "false",
node->isTemp ? "true" : "false",
node->unionall ? "true" : "false");
appendStringInfo(str, " :unique %s :sortClause ",
stringStringInfo(node->uniqueFlag));
appendStringInfo(str, " :unique %s :sortClause ",
stringStringInfo(node->uniqueFlag));
_outNode(str, node->sortClause);
appendStringInfo(str, " :rtable ");
@ -225,8 +221,8 @@ _outQuery(StringInfo str, Query *node)
_outNode(str, node->havingQual);
appendStringInfo(str, " :hasAggs %s :hasSubLinks %s :unionClause ",
node->hasAggs ? "true" : "false",
node->hasSubLinks ? "true" : "false");
node->hasAggs ? "true" : "false",
node->hasSubLinks ? "true" : "false");
_outNode(str, node->unionClause);
appendStringInfo(str, " :intersectClause ");
@ -255,9 +251,9 @@ _outSortClause(StringInfo str, SortClause *node)
static void
_outGroupClause(StringInfo str, GroupClause *node)
{
appendStringInfo(str, " GROUPCLAUSE :grpOpoid %u :tleGroupref %d",
node->grpOpoid,
node->tleGroupref);
appendStringInfo(str, " GROUPCLAUSE :grpOpoid %u :tleGroupref %d",
node->grpOpoid,
node->tleGroupref);
}
/*
@ -266,12 +262,12 @@ _outGroupClause(StringInfo str, GroupClause *node)
static void
_outPlanInfo(StringInfo str, Plan *node)
{
appendStringInfo(str,
":cost %g :size %d :width %d :state %s :qptargetlist ",
node->cost,
node->plan_size,
node->plan_width,
node->state ? "not-NULL" : "<>");
appendStringInfo(str,
":cost %g :size %d :width %d :state %s :qptargetlist ",
node->cost,
node->plan_size,
node->plan_width,
node->state ? "not-NULL" : "<>");
_outNode(str, node->targetlist);
appendStringInfo(str, " :qpqual ");
@ -331,9 +327,9 @@ _outAppend(StringInfo str, Append *node)
appendStringInfo(str, " :unionrtables ");
_outNode(str, node->unionrtables);
appendStringInfo(str,
" :inheritrelid %u :inheritrtable ",
node->inheritrelid);
appendStringInfo(str,
" :inheritrelid %u :inheritrtable ",
node->inheritrelid);
_outNode(str, node->inheritrtable);
}
@ -384,13 +380,13 @@ _outHashJoin(StringInfo str, HashJoin *node)
appendStringInfo(str, " :hashclauses ");
_outNode(str, node->hashclauses);
appendStringInfo(str,
" :hashjoinop %u ",
node->hashjoinop);
appendStringInfo(str,
" :hashjoinop %u ",
node->hashjoinop);
appendStringInfo(str,
" :hashdone %d ",
node->hashdone);
appendStringInfo(str,
" :hashdone %d ",
node->hashdone);
}
static void
@ -460,14 +456,14 @@ _outIndexScan(StringInfo str, IndexScan *node)
* Noname is a subclass of Plan
*/
static void
_outNoname(StringInfo str, Noname *node)
_outNoname(StringInfo str, Noname * node)
{
appendStringInfo(str, " NONAME ");
_outPlanInfo(str, (Plan *) node);
appendStringInfo(str, " :nonameid %u :keycount %d ",
node->nonameid,
node->keycount);
appendStringInfo(str, " :nonameid %u :keycount %d ",
node->nonameid,
node->keycount);
}
/*
@ -480,8 +476,8 @@ _outSort(StringInfo str, Sort *node)
_outPlanInfo(str, (Plan *) node);
appendStringInfo(str, " :nonameid %u :keycount %d ",
node->nonameid,
node->keycount);
node->nonameid,
node->keycount);
}
static void
@ -503,8 +499,8 @@ _outGroup(StringInfo str, Group *node)
/* the actual Group fields */
appendStringInfo(str, " :numCols %d :tuplePerGroup %s ",
node->numCols,
node->tuplePerGroup ? "true" : "false");
node->numCols,
node->tuplePerGroup ? "true" : "false");
}
/*
@ -517,8 +513,8 @@ _outUnique(StringInfo str, Unique *node)
_outPlanInfo(str, (Plan *) node);
appendStringInfo(str, " :nonameid %u :keycount %d ",
node->nonameid,
node->keycount);
node->nonameid,
node->keycount);
}
@ -548,18 +544,18 @@ static void
_outResdom(StringInfo str, Resdom *node)
{
appendStringInfo(str, " RESDOM :resno %d :restype %u :restypmod %d",
node->resno,
node->restype,
node->restypmod);
node->resno,
node->restype,
node->restypmod);
appendStringInfo(str, " :resname \"%s\" :reskey %d :reskeyop %u",
stringStringInfo(node->resname),
node->reskey,
node->reskeyop);
stringStringInfo(node->resname),
node->reskey,
node->reskeyop);
appendStringInfo(str, " :resgroupref %d :resjunk %s ",
node->resgroupref,
node->resjunk ? "true" : "false");
node->resgroupref,
node->resjunk ? "true" : "false");
}
static void
@ -568,14 +564,14 @@ _outFjoin(StringInfo str, Fjoin *node)
int i;
appendStringInfo(str, " FJOIN :initialized %s :nNodes %d ",
node->fj_initialized ? "true" : "false",
node->fj_nNodes);
node->fj_initialized ? "true" : "false",
node->fj_nNodes);
appendStringInfo(str, " :innerNode ");
_outNode(str, node->fj_innerNode);
appendStringInfo(str, " :results @ 0x%x :alwaysdone",
(int) node->fj_results);
appendStringInfo(str, " :results @ 0x%x :alwaysdone",
(int) node->fj_results);
for (i = 0; i < node->fj_nNodes; i++)
appendStringInfo(str, (node->fj_alwaysDone[i]) ? "true" : "false");
@ -590,7 +586,7 @@ _outExpr(StringInfo str, Expr *node)
char *opstr = NULL;
appendStringInfo(str, " EXPR :typeOid %u ",
node->typeOid);
node->typeOid);
switch (node->opType)
{
@ -626,17 +622,17 @@ _outExpr(StringInfo str, Expr *node)
static void
_outVar(StringInfo str, Var *node)
{
appendStringInfo(str,
" VAR :varno %d :varattno %d :vartype %u :vartypmod %d ",
node->varno,
node->varattno,
node->vartype,
node->vartypmod);
appendStringInfo(str,
" VAR :varno %d :varattno %d :vartype %u :vartypmod %d ",
node->varno,
node->varattno,
node->vartype,
node->vartypmod);
appendStringInfo(str, " :varlevelsup %u :varnoold %d :varoattno %d" ,
node->varlevelsup,
node->varnoold,
node->varoattno);
appendStringInfo(str, " :varlevelsup %u :varnoold %d :varoattno %d",
node->varlevelsup,
node->varnoold,
node->varoattno);
}
/*
@ -645,37 +641,37 @@ _outVar(StringInfo str, Var *node)
static void
_outConst(StringInfo str, Const *node)
{
appendStringInfo(str,
" CONST :consttype %u :constlen %d :constisnull %s :constvalue ",
node->consttype,
node->constlen,
node->constisnull ? "true" : "false");
appendStringInfo(str,
" CONST :consttype %u :constlen %d :constisnull %s :constvalue ",
node->consttype,
node->constlen,
node->constisnull ? "true" : "false");
if (node->constisnull)
appendStringInfo(str, "<>");
else
_outDatum(str, node->constvalue, node->consttype);
appendStringInfo(str, " :constbyval %s ",
node->constbyval ? "true" : "false");
appendStringInfo(str, " :constbyval %s ",
node->constbyval ? "true" : "false");
}
/*
* Aggref
*/
static void
_outAggref(StringInfo str, Aggref *node)
_outAggref(StringInfo str, Aggref * node)
{
appendStringInfo(str,
" AGGREG :aggname %s :basetype %u :aggtype %u :target ",
stringStringInfo(node->aggname),
node->basetype,
node->aggtype);
appendStringInfo(str,
" AGGREG :aggname %s :basetype %u :aggtype %u :target ",
stringStringInfo(node->aggname),
node->basetype,
node->aggtype);
_outNode(str, node->target);
appendStringInfo(str, ":aggno %d :usenulls %s",
node->aggno,
node->usenulls ? "true" : "false");
node->aggno,
node->usenulls ? "true" : "false");
}
/*
@ -684,10 +680,10 @@ _outAggref(StringInfo str, Aggref *node)
static void
_outSubLink(StringInfo str, SubLink *node)
{
appendStringInfo(str,
" SUBLINK :subLinkType %d :useor %s :lefthand ",
node->subLinkType,
node->useor ? "true" : "false");
appendStringInfo(str,
" SUBLINK :subLinkType %d :useor %s :lefthand ",
node->subLinkType,
node->useor ? "true" : "false");
_outNode(str, node->lefthand);
appendStringInfo(str, " :oper ");
@ -705,22 +701,18 @@ _outArray(StringInfo str, Array *node)
{
int i;
appendStringInfo(str,
" ARRAY :arrayelemtype %u :arrayelemlength %d :arrayelembyval %c ",
node->arrayelemtype,
node->arrayelemlength,
node->arrayelembyval ? 't' : 'f');
appendStringInfo(str,
" ARRAY :arrayelemtype %u :arrayelemlength %d :arrayelembyval %c ",
node->arrayelemtype,
node->arrayelemlength,
node->arrayelembyval ? 't' : 'f');
appendStringInfo(str, " :arrayndim %d :arraylow ", node->arrayndim);
for (i = 0; i < node->arrayndim; i++)
{
appendStringInfo(str, " %d ", node->arraylow.indx[i]);
}
appendStringInfo(str, " :arrayhigh ");
for (i = 0; i < node->arrayndim; i++)
{
appendStringInfo(str, " %d ", node->arrayhigh.indx[i]);
}
appendStringInfo(str, " :arraylen %d ", node->arraylen);
}
@ -730,14 +722,14 @@ _outArray(StringInfo str, Array *node)
static void
_outArrayRef(StringInfo str, ArrayRef *node)
{
appendStringInfo(str,
" ARRAYREF :refelemtype %u :refattrlength $d :refelemlength %d ",
node->refelemtype,
node->refattrlength,
node->refelemlength);
appendStringInfo(str,
" ARRAYREF :refelemtype %u :refattrlength $d :refelemlength %d ",
node->refelemtype,
node->refattrlength,
node->refelemlength);
appendStringInfo(str, " :refelembyval %c :refupperindex ",
node->refelembyval ? 't' : 'f');
appendStringInfo(str, " :refelembyval %c :refupperindex ",
node->refelembyval ? 't' : 'f');
_outNode(str, node->refupperindexpr);
appendStringInfo(str, " :reflowerindex ");
@ -756,15 +748,15 @@ _outArrayRef(StringInfo str, ArrayRef *node)
static void
_outFunc(StringInfo str, Func *node)
{
appendStringInfo(str,
" FUNC :funcid %u :functype %u :funcisindex %s :funcsize %d ",
node->funcid,
node->functype,
node->funcisindex ? "true" : "false",
node->funcsize);
appendStringInfo(str,
" FUNC :funcid %u :functype %u :funcisindex %s :funcsize %d ",
node->funcid,
node->functype,
node->funcisindex ? "true" : "false",
node->funcsize);
appendStringInfo(str, " :func_fcache @ 0x%x :func_tlist ",
(int) node->func_fcache);
(int) node->func_fcache);
_outNode(str, node->func_tlist);
appendStringInfo(str, " :func_planlist ");
@ -777,11 +769,11 @@ _outFunc(StringInfo str, Func *node)
static void
_outOper(StringInfo str, Oper *node)
{
appendStringInfo(str,
" OPER :opno %u :opid %u :opresulttype %u ",
node->opno,
node->opid,
node->opresulttype);
appendStringInfo(str,
" OPER :opno %u :opid %u :opresulttype %u ",
node->opno,
node->opid,
node->opresulttype);
}
/*
@ -790,12 +782,12 @@ _outOper(StringInfo str, Oper *node)
static void
_outParam(StringInfo str, Param *node)
{
appendStringInfo(str,
" PARAM :paramkind %d :paramid %d :paramname %s :paramtype %u ",
node->paramkind,
node->paramid,
stringStringInfo(node->paramname),
node->paramtype);
appendStringInfo(str,
" PARAM :paramkind %d :paramid %d :paramname %s :paramtype %u ",
node->paramkind,
node->paramid,
stringStringInfo(node->paramname),
node->paramtype);
appendStringInfo(str, " :param_tlist ");
_outNode(str, node->param_tlist);
@ -811,31 +803,31 @@ _outParam(StringInfo str, Param *node)
static void
_outEState(StringInfo str, EState *node)
{
appendStringInfo(str,
" ESTATE :direction %d :range_table ",
node->es_direction);
appendStringInfo(str,
" ESTATE :direction %d :range_table ",
node->es_direction);
_outNode(str, node->es_range_table);
appendStringInfo(str, " :result_relation_info @ 0x%x ",
(int) (node->es_result_relation_info));
(int) (node->es_result_relation_info));
}
/*
* Stuff from relation.h
*/
static void
_outRelOptInfo(StringInfo str, RelOptInfo *node)
_outRelOptInfo(StringInfo str, RelOptInfo * node)
{
appendStringInfo(str, " RELOPTINFO :relids ");
_outIntList(str, node->relids);
appendStringInfo(str,
" :indexed %s :pages %u :tuples %u :size %u :width %u :targetlist ",
node->indexed ? "true" : "false",
node->pages,
node->tuples,
node->size,
node->width);
appendStringInfo(str,
" :indexed %s :pages %u :tuples %u :size %u :width %u :targetlist ",
node->indexed ? "true" : "false",
node->pages,
node->tuples,
node->size,
node->width);
_outNode(str, node->targetlist);
appendStringInfo(str, " :pathlist ");
@ -847,10 +839,10 @@ _outRelOptInfo(StringInfo str, RelOptInfo *node)
* This can be changed later, if necessary.
*/
appendStringInfo(str,
" :cheapestpath @ 0x%x :pruneable %s :restrictinfo ",
(int) node->cheapestpath,
node->pruneable ? "true" : "false");
appendStringInfo(str,
" :cheapestpath @ 0x%x :pruneable %s :restrictinfo ",
(int) node->cheapestpath,
node->pruneable ? "true" : "false");
_outNode(str, node->restrictinfo);
appendStringInfo(str, " :joininfo ");
@ -876,18 +868,18 @@ _outTargetEntry(StringInfo str, TargetEntry *node)
static void
_outRangeTblEntry(StringInfo str, RangeTblEntry *node)
{
appendStringInfo(str,
" RTE :relname %s :refname %s :relid %u :inh %s :inFromCl %s :skipAcl %s",
stringStringInfo(node->relname),
stringStringInfo(node->refname),
node->relid,
node->inh ? "true" : "false",
node->inFromCl ? "true" : "false",
node->skipAcl ? "true" : "false");
appendStringInfo(str,
" RTE :relname %s :refname %s :relid %u :inh %s :inFromCl %s :skipAcl %s",
stringStringInfo(node->relname),
stringStringInfo(node->refname),
node->relid,
node->inh ? "true" : "false",
node->inFromCl ? "true" : "false",
node->skipAcl ? "true" : "false");
}
static void
_outRowMark(StringInfo str, RowMark *node)
_outRowMark(StringInfo str, RowMark * node)
{
appendStringInfo(str, " ROWMARK :rti %u :info %u", node->rti, node->info);
}
@ -899,17 +891,17 @@ static void
_outPathOrder(StringInfo str, PathOrder *node)
{
appendStringInfo(str, " PATHORDER :ordtype %d ",
node->ordtype);
node->ordtype);
if (node->ordtype == SORTOP_ORDER)
{
int i;
int i;
appendStringInfo(str, " :sortop ");
if (node->ord.sortop == NULL)
appendStringInfo(str, "<>");
else
{
for (i=0; node->ord.sortop[i] != 0; i++)
for (i = 0; node->ord.sortop[i] != 0; i++)
appendStringInfo(str, " %d ", node->ord.sortop[i]);
appendStringInfo(str, " %d ", 0);
}
@ -917,7 +909,7 @@ _outPathOrder(StringInfo str, PathOrder *node)
else
{
appendStringInfo(str, " :merge ");
_outNode(str,node->ord.merge);
_outNode(str, node->ord.merge);
}
}
@ -928,8 +920,8 @@ static void
_outPath(StringInfo str, Path *node)
{
appendStringInfo(str, " PATH :pathtype %d :cost %f :pathkeys ",
node->pathtype,
node->path_cost);
node->pathtype,
node->path_cost);
_outNode(str, node->pathkeys);
appendStringInfo(str, " :pathorder ");
@ -942,10 +934,10 @@ _outPath(StringInfo str, Path *node)
static void
_outIndexPath(StringInfo str, IndexPath *node)
{
appendStringInfo(str,
" INDEXPATH :pathtype %d :cost %f :pathkeys ",
node->path.pathtype,
node->path.path_cost);
appendStringInfo(str,
" INDEXPATH :pathtype %d :cost %f :pathkeys ",
node->path.pathtype,
node->path.path_cost);
_outNode(str, node->path.pathkeys);
appendStringInfo(str, " :pathorder ");
@ -962,17 +954,17 @@ _outIndexPath(StringInfo str, IndexPath *node)
* NestPath is a subclass of Path
*/
static void
_outNestPath(StringInfo str, NestPath *node)
_outNestPath(StringInfo str, NestPath * node)
{
appendStringInfo(str,
" NESTPATH :pathtype %d :cost %f :pathkeys ",
node->path.pathtype,
node->path.path_cost);
appendStringInfo(str,
" NESTPATH :pathtype %d :cost %f :pathkeys ",
node->path.pathtype,
node->path.path_cost);
_outNode(str, node->path.pathkeys);
appendStringInfo(str, " :pathorder ");
_outNode(str, node->path.pathorder);
appendStringInfo(str, " :pathinfo ");
_outNode(str, node->pathinfo);
@ -981,11 +973,11 @@ _outNestPath(StringInfo str, NestPath *node)
* For now, i'll just print the addresses.
*/
appendStringInfo(str,
" :outerjoinpath @ 0x%x :innerjoinpath @ 0x%x :outjoincost %f :joinid ",
(int) node->outerjoinpath,
(int) node->innerjoinpath,
node->path.outerjoincost);
appendStringInfo(str,
" :outerjoinpath @ 0x%x :innerjoinpath @ 0x%x :outjoincost %f :joinid ",
(int) node->outerjoinpath,
(int) node->innerjoinpath,
node->path.outerjoincost);
_outIntList(str, node->path.joinid);
}
@ -995,15 +987,15 @@ _outNestPath(StringInfo str, NestPath *node)
static void
_outMergePath(StringInfo str, MergePath *node)
{
appendStringInfo(str,
" MERGEPATH :pathtype %d :cost %f :pathkeys ",
node->jpath.path.pathtype,
node->jpath.path.path_cost);
appendStringInfo(str,
" MERGEPATH :pathtype %d :cost %f :pathkeys ",
node->jpath.path.pathtype,
node->jpath.path.path_cost);
_outNode(str, node->jpath.path.pathkeys);
appendStringInfo(str, " :pathorder ");
_outNode(str, node->jpath.path.pathorder);
appendStringInfo(str, " :pathinfo ");
_outNode(str, node->jpath.pathinfo);
@ -1012,11 +1004,11 @@ _outMergePath(StringInfo str, MergePath *node)
* For now, i'll just print the addresses.
*/
appendStringInfo(str,
" :outerjoinpath @ 0x%x :innerjoinpath @ 0x%x :outerjoincost %f :joinid ",
(int) node->jpath.outerjoinpath,
(int) node->jpath.innerjoinpath,
(int) node->jpath.path.outerjoincost);
appendStringInfo(str,
" :outerjoinpath @ 0x%x :innerjoinpath @ 0x%x :outerjoincost %f :joinid ",
(int) node->jpath.outerjoinpath,
(int) node->jpath.innerjoinpath,
(int) node->jpath.path.outerjoincost);
_outIntList(str, node->jpath.path.joinid);
appendStringInfo(str, " :path_mergeclauses ");
@ -1035,10 +1027,10 @@ _outMergePath(StringInfo str, MergePath *node)
static void
_outHashPath(StringInfo str, HashPath *node)
{
appendStringInfo(str,
" HASHPATH :pathtype %d :cost %f :pathkeys ",
node->jpath.path.pathtype,
node->jpath.path.path_cost);
appendStringInfo(str,
" HASHPATH :pathtype %d :cost %f :pathkeys ",
node->jpath.path.pathtype,
node->jpath.path.path_cost);
_outNode(str, node->jpath.path.pathkeys);
appendStringInfo(str, " :pathorder ");
@ -1052,11 +1044,11 @@ _outHashPath(StringInfo str, HashPath *node)
* For now, i'll just print the addresses.
*/
appendStringInfo(str,
" :outerjoinpath @ 0x%x :innerjoinpath @ 0x%x :outerjoincost %f :joinid ",
(int) node->jpath.outerjoinpath,
(int) node->jpath.innerjoinpath,
node->jpath.path.outerjoincost);
appendStringInfo(str,
" :outerjoinpath @ 0x%x :innerjoinpath @ 0x%x :outerjoincost %f :joinid ",
(int) node->jpath.outerjoinpath,
(int) node->jpath.innerjoinpath,
node->jpath.path.outerjoincost);
_outIntList(str, node->jpath.path.joinid);
appendStringInfo(str, " :path_hashclauses ");
@ -1075,10 +1067,10 @@ _outHashPath(StringInfo str, HashPath *node)
static void
_outOrderKey(StringInfo str, OrderKey *node)
{
appendStringInfo(str,
" ORDERKEY :attribute_number %d :array_index %d ",
node->attribute_number,
node->array_index);
appendStringInfo(str,
" ORDERKEY :attribute_number %d :array_index %d ",
node->attribute_number,
node->array_index);
}
/*
@ -1101,31 +1093,31 @@ _outJoinKey(StringInfo str, JoinKey *node)
static void
_outMergeOrder(StringInfo str, MergeOrder *node)
{
appendStringInfo(str,
" MERGEORDER :join_operator %u :left_operator %u :right_operator %u ",
node->join_operator,
node->left_operator,
node->right_operator);
appendStringInfo(str,
" MERGEORDER :join_operator %u :left_operator %u :right_operator %u ",
node->join_operator,
node->left_operator,
node->right_operator);
appendStringInfo(str,
" :left_type %u :right_type %u ",
node->left_type,
node->right_type);
appendStringInfo(str,
" :left_type %u :right_type %u ",
node->left_type,
node->right_type);
}
/*
* RestrictInfo is a subclass of Node.
*/
static void
_outRestrictInfo(StringInfo str, RestrictInfo *node)
_outRestrictInfo(StringInfo str, RestrictInfo * node)
{
appendStringInfo(str, " RESTRICTINFO :clause ");
_outNode(str, node->clause);
appendStringInfo(str,
" :selectivity %f :notclause %s :indexids ",
node->selectivity,
node->notclause ? "true" : "false");
appendStringInfo(str,
" :selectivity %f :notclause %s :indexids ",
node->selectivity,
node->notclause ? "true" : "false");
_outNode(str, node->indexids);
appendStringInfo(str, " :mergejoinorder ");
@ -1152,7 +1144,7 @@ _outJoinMethod(StringInfo str, JoinMethod *node)
* HashInfo is a subclass of JoinMethod.
*/
static void
_outHashInfo(StringInfo str, HashInfo *node)
_outHashInfo(StringInfo str, HashInfo * node)
{
appendStringInfo(str, " HASHINFO :hashop %u :jmkeys ", node->hashop);
_outNode(str, node->jmethod.jmkeys);
@ -1165,7 +1157,7 @@ _outHashInfo(StringInfo str, HashInfo *node)
* JoinInfo is a subclass of Node.
*/
static void
_outJoinInfo(StringInfo str, JoinInfo *node)
_outJoinInfo(StringInfo str, JoinInfo * node)
{
appendStringInfo(str, " JINFO :unjoined_relids ");
_outIntList(str, node->unjoined_relids);
@ -1174,8 +1166,8 @@ _outJoinInfo(StringInfo str, JoinInfo *node)
_outNode(str, node->jinfo_restrictinfo);
appendStringInfo(str, " :mergejoinable %s :hashjoinable %s ",
node->mergejoinable ? "true" : "false",
node->hashjoinable ? "true" : "false");
node->mergejoinable ? "true" : "false",
node->hashjoinable ? "true" : "false");
}
/*
@ -1184,9 +1176,9 @@ _outJoinInfo(StringInfo str, JoinInfo *node)
static void
_outDatum(StringInfo str, Datum value, Oid type)
{
char *s;
char *s;
Size length,
typeLength;
typeLength;
bool byValue;
int i;
@ -1213,6 +1205,7 @@ _outDatum(StringInfo str, Datum value, Oid type)
appendStringInfo(str, " 0 [ ] ");
else
{
/*
* length is unsigned - very bad to do < comparison to -1
* without casting it to int first!! -mer 8 Jan 1991
@ -1221,9 +1214,7 @@ _outDatum(StringInfo str, Datum value, Oid type)
length = VARSIZE(s);
appendStringInfo(str, " %d [ ", length);
for (i = 0; i < length; i++)
{
appendStringInfo(str, " %d ", (int) (s[i]));
}
appendStringInfo(str, "] ");
}
}
@ -1239,19 +1230,19 @@ _outIter(StringInfo str, Iter *node)
static void
_outStream(StringInfo str, Stream *node)
{
appendStringInfo(str,
" STREAM :pathptr @ 0x%x :cinfo @ 0x%x :clausetype %d :upstream @ 0x%x ",
(int) node->pathptr,
(int) node->cinfo,
(int) node->clausetype,
(int) node->upstream);
appendStringInfo(str,
" STREAM :pathptr @ 0x%x :cinfo @ 0x%x :clausetype %d :upstream @ 0x%x ",
(int) node->pathptr,
(int) node->cinfo,
(int) node->clausetype,
(int) node->upstream);
appendStringInfo(str,
" :downstream @ 0x%x :groupup %d :groupcost %f :groupsel %f ",
(int) node->downstream,
node->groupup,
node->groupcost,
node->groupsel);
appendStringInfo(str,
" :downstream @ 0x%x :groupup %d :groupcost %f :groupsel %f ",
(int) node->downstream,
node->groupup,
node->groupcost,
node->groupsel);
}
static void
@ -1289,7 +1280,7 @@ _outValue(StringInfo str, Value *value)
{
switch (value->type)
{
case T_String:
case T_String:
appendStringInfo(str, " \"%s\" ", stringStringInfo(value->val.str));
break;
case T_Integer:
@ -1340,7 +1331,7 @@ _outAConst(StringInfo str, A_Const *node)
static void
_outConstraint(StringInfo str, Constraint *node)
{
appendStringInfo(str," %s :type", stringStringInfo(node->name));
appendStringInfo(str, " %s :type", stringStringInfo(node->name));
switch (node->contype)
{
@ -1374,7 +1365,7 @@ _outConstraint(StringInfo str, Constraint *node)
}
static void
_outCaseExpr(StringInfo str, CaseExpr *node)
_outCaseExpr(StringInfo str, CaseExpr * node)
{
appendStringInfo(str, "CASE ");
_outNode(str, node->args);
@ -1386,7 +1377,7 @@ _outCaseExpr(StringInfo str, CaseExpr *node)
}
static void
_outCaseWhen(StringInfo str, CaseWhen *node)
_outCaseWhen(StringInfo str, CaseWhen * node)
{
appendStringInfo(str, " WHEN ");
_outNode(str, node->expr);
@ -1653,7 +1644,7 @@ _outNode(StringInfo str, void *obj)
char *
nodeToString(void *obj)
{
StringInfoData str;
StringInfoData str;
/* see stringinfo.h for an explanation of this maneuver */
initStringInfo(&str);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.27 1999/05/10 00:45:13 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.28 1999/05/25 16:09:10 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@ -218,17 +218,19 @@ print_expr(Node *expr, List *rtable)
void
print_pathkeys(List *pathkeys, List *rtable)
{
List *i, *k;
List *i,
*k;
printf("(");
foreach(i, pathkeys)
{
List *pathkey = lfirst(i);
List *pathkey = lfirst(i);
printf("(");
foreach(k, pathkey)
{
Node *var = lfirst(k);
print_expr(var, rtable);
if (lnext(k))
printf(", ");
@ -241,7 +243,7 @@ print_pathkeys(List *pathkeys, List *rtable)
}
/*
* print_tl
* print_tl
* print targetlist in a more legible way.
*/
void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.63 1999/05/18 21:34:29 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.64 1999/05/25 16:09:11 momjian Exp $
*
* NOTES
* Most of the read functions for plan nodes are tested. (In fact, they
@ -169,9 +169,9 @@ _readQuery()
local_node->unionClause = nodeRead(true);
/***S*I***/
token = lsptok(NULL, &length); /* skip :intersectClause */
local_node->intersectClause = nodeRead(true);
token = lsptok(NULL, &length); /* skip :intersectClause */
local_node->intersectClause = nodeRead(true);
token = lsptok(NULL, &length); /* skip :limitOffset */
local_node->limitOffset = nodeRead(true);
@ -544,7 +544,7 @@ _readIndexScan()
local_node->indxqual = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* eat :indxqualorig */
local_node->indxqualorig = nodeRead(true); /* now read it */
local_node->indxqualorig = nodeRead(true); /* now read it */
return local_node;
}
@ -1407,7 +1407,7 @@ _readRangeTblEntry()
static RowMark *
_readRowMark()
{
RowMark *local_node = makeNode(RowMark);
RowMark *local_node = makeNode(RowMark);
char *token;
int length;
@ -1431,7 +1431,7 @@ _readRowMark()
static PathOrder *
_readPathOrder()
{
PathOrder *local_node;
PathOrder *local_node;
char *token;
int length;
@ -1443,17 +1443,18 @@ _readPathOrder()
if (local_node->ordtype == SORTOP_ORDER)
{
token = lsptok(NULL, &length); /* get :sortop */
token = lsptok(NULL, &length); /* get :sortop */
if (length == 0)
local_node->ord.sortop = NULL;
else
{
int i = -1;
local_node->ord.sortop = palloc(sizeof(Oid) * (INDEX_MAX_KEYS+1));
do {
int i = -1;
local_node->ord.sortop = palloc(sizeof(Oid) * (INDEX_MAX_KEYS + 1));
do
{
i++;
Assert(i <= INDEX_MAX_KEYS);
token = lsptok(NULL, &length); /* now read it */
@ -1463,7 +1464,7 @@ _readPathOrder()
}
else
{
token = lsptok(NULL, &length); /* get :merge */
token = lsptok(NULL, &length); /* get :merge */
local_node->ord.merge = nodeRead(true); /* now read it */
}
@ -1494,10 +1495,10 @@ _readPath()
local_node->path_cost = (Cost) atof(token);
token = lsptok(NULL, &length); /* get :pathorder */
local_node->pathorder = nodeRead(true); /* now read it */
local_node->pathorder = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathkeys */
local_node->pathkeys = nodeRead(true); /* now read it */
local_node->pathkeys = nodeRead(true); /* now read it */
return local_node;
}
@ -1526,10 +1527,10 @@ _readIndexPath()
local_node->path.path_cost = (Cost) atof(token);
token = lsptok(NULL, &length); /* get :pathorder */
local_node->path.pathorder = nodeRead(true); /* now read it */
local_node->path.pathorder = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
local_node->path.pathkeys = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :indexid */
local_node->indexid = toIntList(nodeRead(true));
@ -1568,7 +1569,7 @@ _readNestPath()
local_node->path.pathorder = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
local_node->path.pathkeys = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathinfo */
local_node->pathinfo = nodeRead(true); /* now read it */
@ -1630,13 +1631,13 @@ _readMergePath()
local_node->jpath.path.path_cost = (Cost) atof(token);
token = lsptok(NULL, &length); /* get :pathorder */
local_node->jpath.path.pathorder = nodeRead(true); /* now read it */
local_node->jpath.path.pathorder = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathkeys */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathinfo */
local_node->jpath.pathinfo = nodeRead(true); /* now read it */
local_node->jpath.pathinfo = nodeRead(true); /* now read it */
/*
* Not sure if these are nodes; they're declared as "struct path *".
@ -1664,7 +1665,7 @@ _readMergePath()
local_node->jpath.path.outerjoincost = (Cost) atof(token);
token = lsptok(NULL, &length); /* get :joinid */
local_node->jpath.path.joinid = toIntList(nodeRead(true)); /* now read it */
local_node->jpath.path.joinid = toIntList(nodeRead(true)); /* now read it */
token = lsptok(NULL, &length); /* get :path_mergeclauses */
local_node->path_mergeclauses = nodeRead(true); /* now read it */
@ -1704,13 +1705,13 @@ _readHashPath()
local_node->jpath.path.path_cost = (Cost) atof(token);
token = lsptok(NULL, &length); /* get :pathorder */
local_node->jpath.path.pathorder = nodeRead(true); /* now read it */
local_node->jpath.path.pathorder = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathkeys */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :pathinfo */
local_node->jpath.pathinfo = nodeRead(true); /* now read it */
local_node->jpath.pathinfo = nodeRead(true); /* now read it */
/*
* Not sure if these are nodes; they're declared as "struct path *".
@ -1738,7 +1739,7 @@ _readHashPath()
local_node->jpath.path.outerjoincost = (Cost) atof(token);
token = lsptok(NULL, &length); /* get :joinid */
local_node->jpath.path.joinid = toIntList(nodeRead(true)); /* now read it */
local_node->jpath.path.joinid = toIntList(nodeRead(true)); /* now read it */
token = lsptok(NULL, &length); /* get :path_hashclauses */
local_node->path_hashclauses = nodeRead(true); /* now read it */
@ -1924,7 +1925,7 @@ _readJoinMethod()
static HashInfo *
_readHashInfo()
{
HashInfo *local_node;
HashInfo *local_node;
char *token;
int length;
@ -1960,10 +1961,10 @@ _readJoinInfo()
local_node = makeNode(JoinInfo);
token = lsptok(NULL, &length); /* get :unjoined_relids */
local_node->unjoined_relids = toIntList(nodeRead(true)); /* now read it */
local_node->unjoined_relids = toIntList(nodeRead(true)); /* now read it */
token = lsptok(NULL, &length); /* get :jinfo_restrictinfo */
local_node->jinfo_restrictinfo = nodeRead(true); /* now read it */
local_node->jinfo_restrictinfo = nodeRead(true); /* now read it */
token = lsptok(NULL, &length); /* get :mergejoinable */

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: geqo_eval.c,v 1.37 1999/05/17 00:25:34 tgl Exp $
* $Id: geqo_eval.c,v 1.38 1999/05/25 16:09:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -56,7 +56,7 @@ static MemoryContext geqo_eval_context;
/*
* geqo_eval_startup:
* Must be called during geqo_main startup (before geqo_eval may be called)
* Must be called during geqo_main startup (before geqo_eval may be called)
*
* The main thing we need to do here is prepare a private memory context for
* allocation of temp storage used while constructing a path in geqo_eval().
@ -70,9 +70,10 @@ void
geqo_eval_startup(void)
{
#define GEQO_PORTAL_NAME "<geqo workspace>"
Portal geqo_portal = GetPortalByName(GEQO_PORTAL_NAME);
Portal geqo_portal = GetPortalByName(GEQO_PORTAL_NAME);
if (!PortalIsValid(geqo_portal)) {
if (!PortalIsValid(geqo_portal))
{
/* First time through (within current transaction, that is) */
geqo_portal = CreatePortal(GEQO_PORTAL_NAME);
Assert(PortalIsValid(geqo_portal));
@ -89,15 +90,18 @@ geqo_eval_startup(void)
Cost
geqo_eval(Query *root, Gene *tour, int num_gene)
{
MemoryContext oldcxt;
RelOptInfo *joinrel;
Cost fitness;
List *savelist;
MemoryContext oldcxt;
RelOptInfo *joinrel;
Cost fitness;
List *savelist;
/* preserve root->join_rel_list, which gimme_tree changes */
savelist = root->join_rel_list;
/* create a temporary allocation context for the path construction work */
/*
* create a temporary allocation context for the path construction
* work
*/
oldcxt = MemoryContextSwitchTo(geqo_eval_context);
StartPortalAllocMode(DefaultAllocMode, 0);
@ -118,7 +122,7 @@ geqo_eval(Query *root, Gene *tour, int num_gene)
}
/*
* gimme_tree
* gimme_tree
* this program presumes that only LEFT-SIDED TREES are considered!
*
* 'old_rel' is the preceding join
@ -126,7 +130,7 @@ geqo_eval(Query *root, Gene *tour, int num_gene)
* Returns a new join relation incorporating all joins in a left-sided tree.
*/
RelOptInfo *
gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo *old_rel)
gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo * old_rel)
{
RelOptInfo *inner_rel; /* current relation */
int base_rel_index;
@ -139,7 +143,7 @@ gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo *old
/* tour[0] = 3; tour[1] = 1; tour[2] = 2 */
base_rel_index = (int) tour[rel_count];
inner_rel = (RelOptInfo *) nth(base_rel_index-1, root->base_rel_list);
inner_rel = (RelOptInfo *) nth(base_rel_index - 1, root->base_rel_list);
if (rel_count == 0)
{ /* processing first join with
@ -151,15 +155,17 @@ gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo *old
{ /* tree main part */
if (!(new_rels = make_rels_by_clause_joins(root, old_rel,
old_rel->joininfo,
inner_rel->relids)))
inner_rel->relids)))
{
new_rels = make_rels_by_clauseless_joins(old_rel,
lcons(inner_rel,NIL));
/* we don't do bushy plans in geqo, do we? bjm 02/18/1999
new_rels = append(new_rels,
make_rels_by_clauseless_joins(old_rel,
lcons(old_rel,NIL));
*/
lcons(inner_rel, NIL));
/*
* we don't do bushy plans in geqo, do we? bjm 02/18/1999
* new_rels = append(new_rels,
* make_rels_by_clauseless_joins(old_rel,
* lcons(old_rel,NIL));
*/
}
/* process new_rel->pathlist */
@ -202,5 +208,5 @@ gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo *old
}
}
return old_rel; /* tree finished ... */
return old_rel; /* tree finished ... */
}

View File

@ -6,7 +6,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: geqo_main.c,v 1.15 1999/05/17 00:25:33 tgl Exp $
* $Id: geqo_main.c,v 1.16 1999/05/25 16:09:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -77,24 +77,28 @@ geqo(Query *root)
status_interval;
Gene *best_tour;
RelOptInfo *best_rel;
#if defined(ERX)
Edge *edge_table; /* list of edges */
int edge_failures = 0;
float difference;
#endif
#if defined(CX) || defined(PX) || defined(OX1) || defined(OX2)
City *city_table; /* list of cities */
#endif
#if defined(CX)
int cycle_diffs = 0;
int mutations = 0;
#endif
/* set tour size */
number_of_rels = length(root->base_rel_list);
/* set GA parameters */
geqo_params(number_of_rels); /* read "$PGDATA/pg_geqo" file */
geqo_params(number_of_rels);/* read "$PGDATA/pg_geqo" file */
pool_size = PoolSize;
number_generations = Generations;
status_interval = 10;

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: geqo_misc.c,v 1.17 1999/02/13 23:16:09 momjian Exp $
* $Id: geqo_misc.c,v 1.18 1999/05/25 16:09:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -261,7 +261,7 @@ geqo_print_path(Query *root, Path *path, int indent)
}
void
geqo_print_rel(Query *root, RelOptInfo *rel)
geqo_print_rel(Query *root, RelOptInfo * rel)
{
List *l;

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: geqo_pool.c,v 1.11 1999/02/13 23:16:12 momjian Exp $
* $Id: geqo_pool.c,v 1.12 1999/05/25 16:09:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -109,7 +109,7 @@ random_init_pool(Query *root, Pool *pool, int strt, int stp)
* "geqo_recombination.c"
* */
pool->data[i].worth = geqo_eval(root, chromo[i].string, pool->string_length); /* "from geqo_eval.c" */
pool->data[i].worth = geqo_eval(root, chromo[i].string, pool->string_length); /* "from geqo_eval.c" */
}
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/Attic/minspantree.c,v 1.10 1999/02/13 23:16:13 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/Attic/minspantree.c,v 1.11 1999/05/25 16:09:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -41,7 +41,7 @@
*/
void
minspantree(Query *root, List *join_rels, RelOptInfo *garel)
minspantree(Query *root, List *join_rels, RelOptInfo * garel)
{
int number_of_rels = length(root->base_rel_list);
int number_of_joins = length(join_rels);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.1 1999/02/18 19:58:53 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.2 1999/05/25 16:09:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -257,7 +257,7 @@ xfunc_llel_chains(Stream root, Stream bottom)
*/
Assert(xfunc_num_relids(pathstream) > xfunc_num_relids(tmpstream));
progress = xfunc_prdmig_pullup(origstream, tmpstream,
(JoinPath) get_pathptr(pathstream));
(JoinPath) get_pathptr(pathstream));
}
if (get_downstream(tmpstream))
pathstream = (Stream) xfunc_get_downjoin((Stream) get_downstream(tmpstream));
@ -269,7 +269,7 @@ xfunc_llel_chains(Stream root, Stream bottom)
}
/*
** xfunc_complete_stream
** xfunc_complete_stream
** Given a stream composed of join nodes only, make a copy containing the
** join nodes along with the associated restriction nodes.
*/
@ -313,7 +313,7 @@ xfunc_complete_stream(Stream stream)
static bool
xfunc_prdmig_pullup(Stream origstream, Stream pullme, JoinPath joinpath)
{
RestrictInfo restrictinfo = get_cinfo(pullme);
RestrictInfo restrictinfo = get_cinfo(pullme);
bool progress = false;
Stream upjoin,
orignode,
@ -347,10 +347,10 @@ xfunc_prdmig_pullup(Stream origstream, Stream pullme, JoinPath joinpath)
else
whichchild = INNER;
restrictinfo = xfunc_pullup((Path) get_pathptr((Stream) get_downstream(upjoin)),
(JoinPath) get_pathptr(upjoin),
restrictinfo,
whichchild,
get_clausetype(orignode));
(JoinPath) get_pathptr(upjoin),
restrictinfo,
whichchild,
get_clausetype(orignode));
set_pathptr(pullme, get_pathptr(upjoin));
/* pullme has been moved into locrestrictinfo */
set_clausetype(pullme, XFUNC_LOCPRD);
@ -390,7 +390,7 @@ xfunc_prdmig_pullup(Stream origstream, Stream pullme, JoinPath joinpath)
}
/*
** xfunc_form_groups
** xfunc_form_groups
** A group is a pair of stream nodes a,b such that a is constrained to
** precede b (for instance if a and b are both joins), but rank(a) > rank(b).
** In such a situation, Monma and Sidney prove that no clauses should end
@ -487,10 +487,10 @@ xfunc_form_groups(Query *queryInfo, Stream root, Stream bottom)
}
/* ------------------- UTILITY FUNCTIONS ------------------------- */
/* ------------------- UTILITY FUNCTIONS ------------------------- */
/*
** xfunc_free_stream
** xfunc_free_stream
** walk down a stream and pfree it
*/
static void
@ -525,7 +525,7 @@ xfunc_add_clauses(Stream current)
foreach(temp, get_loc_restrictinfo((Path) get_pathptr(current)))
{
topnode = xfunc_streaminsert((RestrictInfo) lfirst(temp), topnode,
XFUNC_LOCPRD);
XFUNC_LOCPRD);
}
/* and add in the join clauses */
@ -536,7 +536,7 @@ xfunc_add_clauses(Stream current)
{
if (!equal(get_clause((RestrictInfo) lfirst(temp)), primjoin))
topnode = xfunc_streaminsert((RestrictInfo) lfirst(temp), topnode,
XFUNC_JOINPRD);
XFUNC_JOINPRD);
}
}
return topnode;
@ -623,7 +623,7 @@ xfunc_num_relids(Stream node)
}
/*
** xfunc_get_downjoin
** xfunc_get_downjoin
** Given a stream node, find the next lowest node which points to a
** join predicate or a scan node.
*/
@ -642,7 +642,7 @@ xfunc_get_downjoin(Stream node)
}
/*
** xfunc_get_upjoin
** xfunc_get_upjoin
** same as above, but upwards.
*/
static StreamPtr
@ -660,7 +660,7 @@ xfunc_get_upjoin(Stream node)
}
/*
** xfunc_stream_qsort
** xfunc_stream_qsort
** Given a stream, sort by group rank the elements in the stream from the
** node "bottom" up. DESTRUCTIVELY MODIFIES STREAM! Returns new root.
*/

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/xfunc.c,v 1.2 1999/05/10 00:45:14 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/xfunc.c,v 1.3 1999/05/25 16:09:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -49,7 +49,7 @@ static int xfunc_card_unreferenced(Query *queryInfo,
*/
/*
** xfunc_trypullup
** xfunc_trypullup
** Preliminary pullup of predicates, to allow for maximal pruning.
** Given a relation, check each of its paths and see if you can
** pullup clauses from its inner and outer.
@ -59,7 +59,7 @@ void
xfunc_trypullup(RelOptInfo rel)
{
LispValue y; /* list ptr */
RestrictInfo maxcinfo; /* The RestrictInfo to pull up, as
RestrictInfo maxcinfo; /* The RestrictInfo to pull up, as
* calculated by xfunc_shouldpull() */
JoinPath curpath; /* current path in list */
int progress; /* has progress been made this time
@ -81,7 +81,7 @@ xfunc_trypullup(RelOptInfo rel)
{
/* No, the following should NOT be '==' !! */
if (clausetype = xfunc_shouldpull((Path) get_innerjoinpath(curpath),
curpath, INNER, &maxcinfo))
curpath, INNER, &maxcinfo))
{
xfunc_pullup((Path) get_innerjoinpath(curpath),
@ -96,7 +96,7 @@ xfunc_trypullup(RelOptInfo rel)
/* No, the following should NOT be '==' !! */
if (clausetype = xfunc_shouldpull((Path) get_outerjoinpath(curpath),
curpath, OUTER, &maxcinfo))
curpath, OUTER, &maxcinfo))
{
xfunc_pullup((Path) get_outerjoinpath(curpath),
@ -127,7 +127,7 @@ xfunc_trypullup(RelOptInfo rel)
}
/*
** xfunc_shouldpull
** xfunc_shouldpull
** find clause with highest rank, and decide whether to pull it up
** from child to parent. Currently we only pullup secondary join clauses
** that are in the pathrestrictinfo. Secondary hash and sort clauses are
@ -144,12 +144,12 @@ xfunc_shouldpull(Query *queryInfo,
Path childpath,
JoinPath parentpath,
int whichchild,
RestrictInfo *maxcinfopt) /* Out: pointer to clause
RestrictInfo * maxcinfopt) /* Out: pointer to clause
* to pullup */
{
LispValue clauselist,
tmplist; /* lists of clauses */
RestrictInfo maxcinfo; /* clause to pullup */
RestrictInfo maxcinfo; /* clause to pullup */
LispValue primjoinclause /* primary join clause */
= xfunc_primary_join(parentpath);
Cost tmprank,
@ -225,7 +225,7 @@ xfunc_shouldpull(Query *queryInfo,
|| (!is_join(childpath)
&& (whichchild == INNER)
&& IsA(parentpath, NestPath)
&&!IsA(parentpath, HashPath)
&& !IsA(parentpath, HashPath)
&&!IsA(parentpath, MergePath)))))
{
@ -250,7 +250,7 @@ xfunc_shouldpull(Query *queryInfo,
/*
** xfunc_pullup
** xfunc_pullup
** move clause from child pathnode to parent pathnode. This operation
** makes the child pathnode produce a larger relation than it used to.
** This means that we must construct a new RelOptInfo just for the childpath,
@ -264,7 +264,7 @@ RestrictInfo
xfunc_pullup(Query *queryInfo,
Path childpath,
JoinPath parentpath,
RestrictInfo cinfo, /* clause to pull up */
RestrictInfo cinfo,/* clause to pull up */
int whichchild, /* whether child is INNER or OUTER of join */
int clausetype) /* whether clause to pull is join or local */
{
@ -272,22 +272,22 @@ xfunc_pullup(Query *queryInfo,
RelOptInfo newrel;
Cost pulled_selec;
Cost cost;
RestrictInfo newinfo;
RestrictInfo newinfo;
/* remove clause from childpath */
newkid = (Path) copyObject((Node) childpath);
if (clausetype == XFUNC_LOCPRD)
{
set_locrestrictinfo(newkid,
xfunc_LispRemove((LispValue) cinfo,
(List) get_loc_restrictinfo(newkid)));
xfunc_LispRemove((LispValue) cinfo,
(List) get_loc_restrictinfo(newkid)));
}
else
{
set_pathrestrictinfo
((JoinPath) newkid,
xfunc_LispRemove((LispValue) cinfo,
(List) get_pathrestrictinfo((JoinPath) newkid)));
(List) get_pathrestrictinfo((JoinPath) newkid)));
}
/*
@ -328,8 +328,8 @@ xfunc_pullup(Query *queryInfo,
/* add clause to parentpath, and fix up its cost. */
set_locrestrictinfo(parentpath,
lispCons((LispValue) newinfo,
(LispValue) get_loc_restrictinfo(parentpath)));
lispCons((LispValue) newinfo,
(LispValue) get_loc_restrictinfo(parentpath)));
/* put new childpath into the path tree */
if (whichchild == INNER)
set_innerjoinpath(parentpath, (pathPtr) newkid);
@ -386,7 +386,7 @@ LispValue clause;
}
/*
** xfunc_join_expense
** xfunc_join_expense
** Find global expense of a join clause
*/
Cost
@ -457,7 +457,7 @@ xfunc_local_expense(LispValue clause)
}
/*
** xfunc_func_expense
** xfunc_func_expense
** given a Func or Oper and its args, find its expense.
** Note: in Stonebraker's SIGMOD '91 paper, he uses a more complicated metric
** than the one here. We can ignore the expected number of tuples for
@ -581,7 +581,7 @@ xfunc_func_expense(LispValue node, LispValue args)
}
/*
** xfunc_width
** xfunc_width
** recursively find the width of a expression
*/
@ -694,7 +694,7 @@ xfunc_width(LispValue clause)
*/
Assert(length(get_func_tlist(func)) == 1); /* sanity */
retval = xfunc_width((LispValue)
get_expr(lfirst(get_func_tlist(func))));
get_expr(lfirst(get_func_tlist(func))));
goto exit;
}
else
@ -771,8 +771,8 @@ xfunc_card_product(Query *queryInfo, Relids relids)
{
if (!xfunc_expense(queryInfo, get_clause((RestrictInfo) lfirst(cinfonode))))
tuples *= compute_clause_selec(queryInfo,
get_clause((RestrictInfo) lfirst(cinfonode)),
LispNil);
get_clause((RestrictInfo) lfirst(cinfonode)),
LispNil);
}
if (retval == 0)
@ -857,7 +857,7 @@ LispValue
xfunc_primary_join(JoinPath pathnode)
{
LispValue joinclauselist = get_pathrestrictinfo(pathnode);
RestrictInfo mincinfo;
RestrictInfo mincinfo;
LispValue tmplist;
LispValue minclause = LispNil;
Cost minrank,
@ -931,7 +931,7 @@ xfunc_get_path_cost(Query *queryInfo, Path pathnode)
*/
if (XfuncMode != XFUNC_OFF)
set_locrestrictinfo(pathnode, lisp_qsort(get_loc_restrictinfo(pathnode),
xfunc_cinfo_compare));
xfunc_cinfo_compare));
for (tmplist = get_loc_restrictinfo(pathnode), selec = 1.0;
tmplist != LispNil;
tmplist = lnext(tmplist))
@ -939,7 +939,7 @@ xfunc_get_path_cost(Query *queryInfo, Path pathnode)
cost += (Cost) (xfunc_local_expense(get_clause((RestrictInfo) lfirst(tmplist)))
* (Cost) get_tuples(get_parent(pathnode)) * selec);
selec *= compute_clause_selec(queryInfo,
get_clause((RestrictInfo) lfirst(tmplist)),
get_clause((RestrictInfo) lfirst(tmplist)),
LispNil);
}
@ -951,8 +951,8 @@ xfunc_get_path_cost(Query *queryInfo, Path pathnode)
{
if (XfuncMode != XFUNC_OFF)
set_pathrestrictinfo((JoinPath) pathnode, lisp_qsort
(get_pathrestrictinfo((JoinPath) pathnode),
xfunc_cinfo_compare));
(get_pathrestrictinfo((JoinPath) pathnode),
xfunc_cinfo_compare));
for (tmplist = get_pathrestrictinfo((JoinPath) pathnode), selec = 1.0;
tmplist != LispNil;
tmplist = lnext(tmplist))
@ -960,7 +960,7 @@ xfunc_get_path_cost(Query *queryInfo, Path pathnode)
cost += (Cost) (xfunc_local_expense(get_clause((RestrictInfo) lfirst(tmplist)))
* (Cost) get_tuples(get_parent(pathnode)) * selec);
selec *= compute_clause_selec(queryInfo,
get_clause((RestrictInfo) lfirst(tmplist)),
get_clause((RestrictInfo) lfirst(tmplist)),
LispNil);
}
}
@ -1071,7 +1071,7 @@ xfunc_total_path_cost(JoinPath pathnode)
/*
** xfunc_expense_per_tuple
** xfunc_expense_per_tuple
** return the expense of the join *per-tuple* of the input relation.
** The cost model here is that a join costs
** k*card(outer)*card(inner) + l*card(outer) + m*card(inner) + n
@ -1124,7 +1124,7 @@ xfunc_expense_per_tuple(JoinPath joinnode, int whichchild)
}
/*
** xfunc_fixvars
** xfunc_fixvars
** After pulling up a clause, we must walk its expression tree, fixing Var
** nodes to point to the correct varno (either INNER or OUTER, depending
** on which child the clause was pulled from), and the right varattno in the
@ -1189,8 +1189,8 @@ xfunc_fixvars(LispValue clause, /* clause being pulled up */
int
xfunc_cinfo_compare(void *arg1, void *arg2)
{
RestrictInfo info1 = *(RestrictInfo *) arg1;
RestrictInfo info2 = *(RestrictInfo *) arg2;
RestrictInfo info1 = *(RestrictInfo *) arg1;
RestrictInfo info2 = *(RestrictInfo *) arg2;
LispValue clause1 = (LispValue) get_clause(info1),
clause2 = (LispValue) get_clause(info2);
@ -1223,7 +1223,7 @@ xfunc_clause_compare(void *arg1, void *arg2)
}
/*
** xfunc_disjunct_sort
** xfunc_disjunct_sort
** given a list of clauses, for each clause sort the disjuncts by cost
** (this assumes the predicates have been converted to Conjunctive NF)
** Modifies the clause list!
@ -1287,7 +1287,7 @@ xfunc_disjunct_compare(Query *queryInfo, void *arg1, void *arg2)
/* ------------------------ UTILITY FUNCTIONS ------------------------------- */
/*
** xfunc_func_width
** xfunc_func_width
** Given a function OID and operands, find the width of the return value.
*/
int
@ -1349,7 +1349,7 @@ exit:
}
/*
** xfunc_tuple_width
** xfunc_tuple_width
** Return the sum of the lengths of all the attributes of a given relation
*/
int
@ -1371,7 +1371,7 @@ xfunc_tuple_width(Relation rd)
}
/*
** xfunc_num_join_clauses
** xfunc_num_join_clauses
** Find the number of join clauses associated with this join path
*/
int
@ -1388,7 +1388,7 @@ xfunc_num_join_clauses(JoinPath path)
}
/*
** xfunc_LispRemove
** xfunc_LispRemove
** Just like LispRemove, but it whines if the item to be removed ain't there
*/
LispValue
@ -1419,11 +1419,11 @@ do { \
} while(0)
/*
** xfunc_copyrel
** xfunc_copyrel
** Just like _copyRel, but doesn't copy the paths
*/
bool
xfunc_copyrel(RelOptInfo from, RelOptInfo *to)
xfunc_copyrel(RelOptInfo from, RelOptInfo * to)
{
RelOptInfo newnode;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.44 1999/02/22 05:26:18 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.45 1999/05/25 16:09:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -45,10 +45,10 @@ int32 _use_geqo_rels_ = GEQO_RELS;
static void set_base_rel_pathlist(Query *root, List *rels);
static RelOptInfo *make_one_rel_by_joins(Query *root, List *rels,
int levels_needed);
int levels_needed);
#ifdef OPTIMIZER_DEBUG
static void debug_print_rel(Query *root, RelOptInfo *rel);
static void debug_print_rel(Query *root, RelOptInfo * rel);
#endif
@ -76,6 +76,7 @@ make_one_rel(Query *root, List *rels)
if (levels_needed <= 1)
{
/*
* Unsorted single relation, no more processing is required.
*/
@ -83,6 +84,7 @@ make_one_rel(Query *root, List *rels)
}
else
{
/*
* This means that joins or sorts are required. set selectivities
* of clauses that have not been set by an index.
@ -117,17 +119,17 @@ set_base_rel_pathlist(Query *root, List *rels)
sequential_scan_list = lcons(create_seqscan_path(rel), NIL);
rel_index_scan_list = create_index_paths(root,
rel,
find_relation_indices(root, rel),
rel->restrictinfo,
rel->joininfo);
rel,
find_relation_indices(root, rel),
rel->restrictinfo,
rel->joininfo);
or_index_scan_list = create_or_index_paths(root, rel, rel->restrictinfo);
rel->pathlist = add_pathlist(rel,
sequential_scan_list,
nconc(rel_index_scan_list,
or_index_scan_list));
or_index_scan_list));
set_cheapest(rel, rel->pathlist);
@ -171,13 +173,14 @@ make_one_rel_by_joins(Query *root, List *rels, int levels_needed)
*******************************************/
if ((_use_geqo_) && length(root->base_rel_list) >= _use_geqo_rels_)
return geqo(root);
/*******************************************
* rest will be deprecated in case of GEQO *
*******************************************/
while (--levels_needed)
{
/*
* Determine all possible pairs of relations to be joined at this
* level. Determine paths for joining these relation pairs and
@ -193,6 +196,7 @@ make_one_rel_by_joins(Query *root, List *rels, int levels_needed)
root->join_rel_list = rels = joined_rels;
#ifdef NOT_USED
/*
* * for each expensive predicate in each path in each distinct
* rel, * consider doing pullup -- JMH
@ -351,7 +355,7 @@ print_path(Query *root, Path *path, int indent)
}
static void
debug_print_rel(Query *root, RelOptInfo *rel)
debug_print_rel(Query *root, RelOptInfo * rel)
{
List *l;

Some files were not shown because too many files have changed in this diff Show More