1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

pgindent run.

This commit is contained in:
Bruce Momjian
2002-09-04 20:31:48 +00:00
parent c91ceec21d
commit e50f52a074
446 changed files with 14942 additions and 13363 deletions

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.81 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.82 2002/09/04 20:31:08 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -326,9 +326,9 @@ nocachegetattr(HeapTuple tuple,
/*
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or var-widths before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
* with no nulls or var-widths before the target attribute. If
* possible, we also want to initialize the remainder of the attribute
* cached offset values.
*/
if (!slow)
{
@@ -702,8 +702,8 @@ heap_modifytuple(HeapTuple tuple,
nulls);
/*
* copy the identification info of the old tuple: t_ctid, t_self,
* and OID (if any)
* copy the identification info of the old tuple: t_ctid, t_self, and
* OID (if any)
*/
newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
newTuple->t_self = tuple->t_self;

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.59 2002/08/25 17:20:00 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.60 2002/09/04 20:31:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -319,9 +319,9 @@ nocache_index_getattr(IndexTuple tup,
/*
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or var-widths before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
* with no nulls or var-widths before the target attribute. If
* possible, we also want to initialize the remainder of the attribute
* cached offset values.
*/
if (!slow)
{

View File

@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.64 2002/08/24 15:00:46 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.65 2002/09/04 20:31:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,7 +23,7 @@
static void printtup_setup(DestReceiver *self, int operation,
const char *portalName, TupleDesc typeinfo);
const char *portalName, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self);
static void printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self);
static void printtup_cleanup(DestReceiver *self);
@@ -88,8 +88,8 @@ printtup_setup(DestReceiver *self, int operation,
pq_puttextmessage('P', portalName);
/*
* if this is a retrieve, then we send back the tuple
* descriptor of the tuples.
* if this is a retrieve, then we send back the tuple descriptor of
* the tuples.
*/
if (operation == CMD_SELECT)
{
@@ -100,7 +100,7 @@ printtup_setup(DestReceiver *self, int operation,
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'T'); /* tuple descriptor message type */
pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
for (i = 0; i < natts; ++i)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.88 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.89 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -114,8 +114,8 @@ CreateTupleDescCopy(TupleDesc tupdesc)
{
desc->attrs[i] = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
memcpy(desc->attrs[i],
tupdesc->attrs[i],
ATTRIBUTE_TUPLE_SIZE);
tupdesc->attrs[i],
ATTRIBUTE_TUPLE_SIZE);
desc->attrs[i]->attnotnull = false;
desc->attrs[i]->atthasdef = false;
}
@@ -148,8 +148,8 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc)
{
desc->attrs[i] = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
memcpy(desc->attrs[i],
tupdesc->attrs[i],
ATTRIBUTE_TUPLE_SIZE);
tupdesc->attrs[i],
ATTRIBUTE_TUPLE_SIZE);
}
if (constr)
{
@@ -425,9 +425,8 @@ TupleDescInitEntry(TupleDesc desc,
*
* (Why not just make the atttypid point to the OID type, instead of the
* type the query returns? Because the executor uses the atttypid to
* tell the front end what type will be returned,
* and in the end the type returned will be the result of the query,
* not an OID.)
* tell the front end what type will be returned, and in the end the
* type returned will be the result of the query, not an OID.)
*
* (Why not wait until the return type of the set is known (i.e., the
* recursive call to the executor to execute the set has returned)

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.95 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -294,6 +294,7 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);
@@ -494,13 +495,13 @@ gistlayerinsert(Relation r, BlockNumber blkno,
/* key is modified, so old version must be deleted */
ItemPointerSet(&oldtid, blkno, child);
gistdelete(r, &oldtid);
/*
* if child was splitted, new key for child will be inserted
* in the end list of child, so we must say to any scans
* that page is changed beginning from 'child' offset
* if child was splitted, new key for child will be inserted in
* the end list of child, so we must say to any scans that page is
* changed beginning from 'child' offset
*/
if ( ret & SPLITED )
if (ret & SPLITED)
gistadjscans(r, GISTOP_SPLIT, blkno, child);
}
@@ -615,7 +616,7 @@ gistwritebuffer(Relation r, Page page, IndexTuple *itup,
static int
gistnospace(Page page, IndexTuple *itvec, int len)
{
unsigned int size = 0;
unsigned int size = 0;
int i;
for (i = 0; i < len; i++)
@@ -679,7 +680,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
needfree = (bool *) palloc(((len == 1) ? 2 : len) * sizeof(bool));
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
storage = (char*)palloc( ((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
storage = (char *) palloc(((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
for (j = 0; j < r->rd_att->natts; j++)
@@ -786,7 +787,7 @@ gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gis
int j;
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
storage = (char*) palloc( 2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
storage = (char *) palloc(2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
@@ -911,7 +912,7 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV
needfree = (bool *) palloc(((len == 1) ? 2 : len) * sizeof(bool));
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
storage = (char*)palloc( ((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
storage = (char *) palloc(((len == 1) ? 2 : len) * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
for (j = 1; j < r->rd_att->natts; j++)
@@ -1098,7 +1099,7 @@ gistadjsubkey(Relation r,
v->spl_nright = curlen;
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
storage = (char*)palloc( 2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
storage = (char *) palloc(2 * sizeof(GISTENTRY) + MAXALIGN(VARHDRSZ));
evec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
@@ -1276,7 +1277,7 @@ gistSplit(Relation r,
/* workaround for 64-bit: ensure GISTENTRY array is maxaligned */
storage = palloc(MAXALIGN(VARHDRSZ) + (*len + 1) * sizeof(GISTENTRY));
entryvec = (bytea *) (storage + MAXALIGN(VARHDRSZ) - VARHDRSZ);
decompvec = (bool *) palloc( (*len + 1) * sizeof(bool));
decompvec = (bool *) palloc((*len + 1) * sizeof(bool));
VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
for (i = 1; i <= *len; i++)
{

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.34 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.35 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@ gistgettuple(PG_FUNCTION_ARGS)
{
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
bool res;
bool res;
/* if we have it cached in the scan desc, just return the value */
if (gistscancache(s, dir))

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.59 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.60 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -164,6 +164,7 @@ hashinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);
@@ -213,7 +214,7 @@ hashgettuple(PG_FUNCTION_ARGS)
HashScanOpaque so = (HashScanOpaque) scan->opaque;
Page page;
OffsetNumber offnum;
bool res;
bool res;
/*
* If we've already initialized this scan, we can just advance it in
@@ -228,18 +229,21 @@ hashgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->hashso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples:
* we mark the buffer dirty but don't make a WAL log entry.
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->hashso_curbuf);
}
/*
* Now continue the scan.
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.34 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.35 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -96,7 +96,8 @@ hashname(PG_FUNCTION_ARGS)
char *key = NameStr(*PG_GETARG_NAME(0));
int keylen = strlen(key);
Assert(keylen < NAMEDATALEN); /* else it's not truncated correctly */
Assert(keylen < NAMEDATALEN); /* else it's not truncated
* correctly */
return hash_any((unsigned char *) key, keylen);
}
@@ -134,9 +135,9 @@ hashvarlena(PG_FUNCTION_ARGS)
* high bits or all three low bits, whether the original value of a,b,c
* is almost all zero or is uniformly distributed,
* - If mix() is run forward or backward, at least 32 bits in a,b,c
* have at least 1/4 probability of changing.
* have at least 1/4 probability of changing.
* - If mix() is run forward, every bit of c will change between 1/3 and
* 2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
* 2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
*----------
*/
#define mix(a,b,c) \
@@ -147,17 +148,17 @@ hashvarlena(PG_FUNCTION_ARGS)
a -= b; a -= c; a ^= (c>>12); \
b -= c; b -= a; b ^= (a<<16); \
c -= a; c -= b; c ^= (b>>5); \
a -= b; a -= c; a ^= (c>>3); \
a -= b; a -= c; a ^= (c>>3); \
b -= c; b -= a; b ^= (a<<10); \
c -= a; c -= b; c ^= (b>>15); \
}
/*
* hash_any() -- hash a variable-length key into a 32-bit value
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
* Returns a uint32 value. Every bit of the key affects every bit of
* Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
@@ -166,7 +167,10 @@ hashvarlena(PG_FUNCTION_ARGS)
Datum
hash_any(register const unsigned char *k, register int keylen)
{
register uint32 a,b,c,len;
register uint32 a,
b,
c,
len;
/* Set up the internal state */
len = keylen;
@@ -176,32 +180,44 @@ hash_any(register const unsigned char *k, register int keylen)
/* handle most of the key */
while (len >= 12)
{
a += (k[0] +((uint32)k[1]<<8) +((uint32)k[2]<<16) +((uint32)k[3]<<24));
b += (k[4] +((uint32)k[5]<<8) +((uint32)k[6]<<16) +((uint32)k[7]<<24));
c += (k[8] +((uint32)k[9]<<8) +((uint32)k[10]<<16)+((uint32)k[11]<<24));
mix(a,b,c);
k += 12; len -= 12;
a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24));
b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24));
c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24));
mix(a, b, c);
k += 12;
len -= 12;
}
/* handle the last 11 bytes */
c += keylen;
switch (len) /* all the case statements fall through */
{
case 11: c+=((uint32)k[10]<<24);
case 10: c+=((uint32)k[9]<<16);
case 9 : c+=((uint32)k[8]<<8);
case 11:
c += ((uint32) k[10] << 24);
case 10:
c += ((uint32) k[9] << 16);
case 9:
c += ((uint32) k[8] << 8);
/* the first byte of c is reserved for the length */
case 8 : b+=((uint32)k[7]<<24);
case 7 : b+=((uint32)k[6]<<16);
case 6 : b+=((uint32)k[5]<<8);
case 5 : b+=k[4];
case 4 : a+=((uint32)k[3]<<24);
case 3 : a+=((uint32)k[2]<<16);
case 2 : a+=((uint32)k[1]<<8);
case 1 : a+=k[0];
case 8:
b += ((uint32) k[7] << 24);
case 7:
b += ((uint32) k[6] << 16);
case 6:
b += ((uint32) k[5] << 8);
case 5:
b += k[4];
case 4:
a += ((uint32) k[3] << 24);
case 3:
a += ((uint32) k[2] << 16);
case 2:
a += ((uint32) k[1] << 8);
case 1:
a += k[0];
/* case 0: nothing left to add */
}
mix(a,b,c);
mix(a, b, c);
/* report the result */
return UInt32GetDatum(c);
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.28 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.29 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* Because we can be doing an index scan on a relation while we
@@ -45,7 +45,7 @@ static HashScanList HashScans = (HashScanList) NULL;
static void _hash_scandel(IndexScanDesc scan,
BlockNumber blkno, OffsetNumber offno);
BlockNumber blkno, OffsetNumber offno);
/*
@@ -158,7 +158,7 @@ _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
* then step backwards (affecting current), then exchange again.
*/
ItemPointerData tmpitem;
Buffer tmpbuf;
Buffer tmpbuf;
tmpitem = *mark;
*mark = *current;

View File

@@ -8,12 +8,12 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.147 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.148 2002/09/04 20:31:09 momjian Exp $
*
*
* INTERFACE ROUTINES
* relation_open - open any relation by relation OID
* relation_openrv - open any relation specified by a RangeVar
* relation_openrv - open any relation specified by a RangeVar
* relation_openr - open a system relation by name
* relation_close - close any relation
* heap_open - open a heap relation by relation OID
@@ -306,7 +306,7 @@ heapgettup(Relation relation,
{
if (ItemIdIsUsed(lpp))
{
bool valid;
bool valid;
tuple->t_datamcxt = NULL;
tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
@@ -985,8 +985,8 @@ heap_fetch(Relation relation,
*userbuf = buffer;
/*
* Count the successful fetch in *pgstat_info if given,
* otherwise in the relation's default statistics area.
* Count the successful fetch in *pgstat_info if given, otherwise
* in the relation's default statistics area.
*/
if (pgstat_info != NULL)
pgstat_count_heap_fetch(pgstat_info);
@@ -1120,6 +1120,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
/* this is redundant with an Assert in HeapTupleSetOid */
Assert(tup->t_data->t_infomask & HEAP_HASOID);
#endif
/*
* If the object id of this tuple has already been assigned, trust
* the caller. There are a couple of ways this can happen. At
@@ -1224,10 +1225,10 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
WriteBuffer(buffer);
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
* buffer, because the "tup" data structure is all in local memory,
* not in the shared buffer.
* If tuple is cachable, mark it for invalidation from the caches in
* case we abort. Note it is OK to do this after WriteBuffer releases
* the buffer, because the "tup" data structure is all in local
* memory, not in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, tup);
@@ -1379,6 +1380,7 @@ l1:
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
#ifdef TUPLE_TOASTER_ACTIVE
/*
* If the relation has toastable attributes, we need to delete no
* longer needed items there too. We have to do this before
@@ -1728,10 +1730,10 @@ l2:
WriteBuffer(buffer);
/*
* If new tuple is cachable, mark it for invalidation from the caches in
* case we abort. Note it is OK to do this after WriteBuffer releases
* the buffer, because the "newtup" data structure is all in local
* memory, not in the shared buffer.
* If new tuple is cachable, mark it for invalidation from the caches
* in case we abort. Note it is OK to do this after WriteBuffer
* releases the buffer, because the "newtup" data structure is all in
* local memory, not in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, newtup);
@@ -2045,16 +2047,16 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlhdr.hdr.mask = newtup->t_data->t_infomask;
if (move) /* remember xmin & xmax */
{
TransactionId xid[2]; /* xmax, xmin */
TransactionId xid[2]; /* xmax, xmin */
if (newtup->t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE))
HEAP_MARKED_FOR_UPDATE))
xid[0] = InvalidTransactionId;
else
xid[0] = HeapTupleHeaderGetXmax(newtup->t_data);
xid[1] = HeapTupleHeaderGetXmin(newtup->t_data);
memcpy((char *) &xlhdr + hsize,
(char *) xid,
(char *) xid,
2 * sizeof(TransactionId));
hsize += 2 * sizeof(TransactionId);
}
@@ -2143,7 +2145,7 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
PageRepairFragmentation(page, NULL);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID); /* prev sui */
PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
}
@@ -2463,11 +2465,11 @@ newsame:;
if (move)
{
TransactionId xid[2]; /* xmax, xmin */
TransactionId xid[2]; /* xmax, xmin */
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
memcpy((char *) xid,
(char *) xlrec + hsize, 2 * sizeof(TransactionId));
(char *) xlrec + hsize, 2 * sizeof(TransactionId));
htup->t_infomask = xlhdr.mask;
htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.35 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -46,7 +46,7 @@ static void toast_insert_or_update(Relation rel, HeapTuple newtup,
static Datum toast_save_datum(Relation rel, Datum value);
static varattrib *toast_fetch_datum(varattrib *attr);
static varattrib *toast_fetch_datum_slice(varattrib *attr,
int32 sliceoffset, int32 length);
int32 sliceoffset, int32 length);
/* ----------
@@ -165,73 +165,68 @@ heap_tuple_untoast_attr(varattrib *attr)
/* ----------
* heap_tuple_untoast_attr_slice -
*
* Public entry point to get back part of a toasted value
* from compression or external storage.
* Public entry point to get back part of a toasted value
* from compression or external storage.
* ----------
*/
varattrib *
varattrib *
heap_tuple_untoast_attr_slice(varattrib *attr, int32 sliceoffset, int32 slicelength)
{
varattrib *preslice;
varattrib *result;
int32 attrsize;
int32 attrsize;
if (VARATT_IS_COMPRESSED(attr))
{
varattrib *tmp;
varattrib *tmp;
if (VARATT_IS_EXTERNAL(attr))
{
tmp = toast_fetch_datum(attr);
}
else
{
tmp = attr; /* compressed in main tuple */
tmp = attr; /* compressed in main tuple */
}
preslice = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
+ VARHDRSZ);
VARATT_SIZEP(preslice) = attr->va_content.va_external.va_rawsize + VARHDRSZ;
pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(preslice));
if (tmp != attr)
if (tmp != attr)
pfree(tmp);
}
else
else
{
/* Plain value */
if (VARATT_IS_EXTERNAL(attr))
{
{
/* fast path */
return (toast_fetch_datum_slice(attr, sliceoffset, slicelength));
}
else
{
preslice = attr;
}
}
/* slicing of datum for compressed cases and plain value */
attrsize = VARSIZE(preslice) - VARHDRSZ;
if (sliceoffset >= attrsize)
if (sliceoffset >= attrsize)
{
sliceoffset = 0;
slicelength = 0;
}
if (((sliceoffset + slicelength) > attrsize) || slicelength < 0)
{
slicelength = attrsize - sliceoffset;
}
result = (varattrib *) palloc(slicelength + VARHDRSZ);
VARATT_SIZEP(result) = slicelength + VARHDRSZ;
memcpy(VARDATA(result), VARDATA(preslice) + sliceoffset, slicelength);
if (preslice != attr) pfree(preslice);
if (preslice != attr)
pfree(preslice);
return result;
}
@@ -1053,9 +1048,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
* Note that because the index is actually on (valueid, chunkidx)
* we will see the chunks in chunkidx order, even though we didn't
* explicitly ask for it.
* Note that because the index is actually on (valueid, chunkidx) we will
* see the chunks in chunkidx order, even though we didn't explicitly
* ask for it.
*/
nextidx = 0;
@@ -1146,45 +1141,44 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
varattrib *result;
int32 attrsize;
int32 residx;
int32 nextidx;
int numchunks;
int startchunk;
int endchunk;
int32 nextidx;
int numchunks;
int startchunk;
int endchunk;
int32 startoffset;
int32 endoffset;
int totalchunks;
int totalchunks;
Pointer chunk;
bool isnull;
int32 chunksize;
int32 chcpystrt;
int32 chcpyend;
int32 chcpystrt;
int32 chcpyend;
attrsize = attr->va_content.va_external.va_extsize;
totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
if (sliceoffset >= attrsize)
if (sliceoffset >= attrsize)
{
sliceoffset = 0;
length = 0;
sliceoffset = 0;
length = 0;
}
if (((sliceoffset + length) > attrsize) || length < 0)
{
length = attrsize - sliceoffset;
}
length = attrsize - sliceoffset;
result = (varattrib *) palloc(length + VARHDRSZ);
VARATT_SIZEP(result) = length + VARHDRSZ;
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
if (length == 0) return (result); /* Can save a lot of work at this point! */
if (length == 0)
return (result); /* Can save a lot of work at this point! */
startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
endchunk = (sliceoffset + length - 1) / TOAST_MAX_CHUNK_SIZE;
numchunks = (endchunk - startchunk ) + 1;
numchunks = (endchunk - startchunk) + 1;
startoffset = sliceoffset % TOAST_MAX_CHUNK_SIZE;
endoffset = (sliceoffset + length - 1) % TOAST_MAX_CHUNK_SIZE;
@@ -1204,33 +1198,34 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
(bits16) 0,
(AttrNumber) 1,
(RegProcedure) F_OIDEQ,
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Now dependent on number of chunks:
*/
if (numchunks == 1)
if (numchunks == 1)
{
ScanKeyEntryInitialize(&toastkey[1],
ScanKeyEntryInitialize(&toastkey[1],
(bits16) 0,
(AttrNumber) 2,
(RegProcedure) F_INT4EQ,
Int32GetDatum(startchunk));
nscankeys = 2;
nscankeys = 2;
}
else
{
ScanKeyEntryInitialize(&toastkey[1],
ScanKeyEntryInitialize(&toastkey[1],
(bits16) 0,
(AttrNumber) 2,
(RegProcedure) F_INT4GE,
Int32GetDatum(startchunk));
ScanKeyEntryInitialize(&toastkey[2],
ScanKeyEntryInitialize(&toastkey[2],
(bits16) 0,
(AttrNumber) 2,
(RegProcedure) F_INT4LE,
Int32GetDatum(endchunk));
nscankeys = 3;
nscankeys = 3;
}
/*
@@ -1279,21 +1274,23 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
*/
chcpystrt = 0;
chcpyend = chunksize - 1;
if (residx == startchunk) chcpystrt = startoffset;
if (residx == endchunk) chcpyend = endoffset;
memcpy(((char *) VARATT_DATA(result)) +
(residx * TOAST_MAX_CHUNK_SIZE - sliceoffset) +chcpystrt,
if (residx == startchunk)
chcpystrt = startoffset;
if (residx == endchunk)
chcpyend = endoffset;
memcpy(((char *) VARATT_DATA(result)) +
(residx * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt,
VARATT_DATA(chunk) + chcpystrt,
(chcpyend - chcpystrt) + 1);
nextidx++;
}
/*
* Final checks that we successfully fetched the datum
*/
if ( nextidx != (endchunk + 1))
if (nextidx != (endchunk + 1))
elog(ERROR, "missing chunk number %d for toast value %u",
nextidx,
attr->va_content.va_external.va_valueid);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.35 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -77,7 +77,7 @@ RelationGetIndexScan(Relation indexRelation,
scan->heapRelation = NULL; /* may be set later */
scan->indexRelation = indexRelation;
scan->xs_snapshot = SnapshotNow; /* may be set later */
scan->xs_snapshot = SnapshotNow; /* may be set later */
scan->numberOfKeys = nkeys;
/*
@@ -90,8 +90,8 @@ RelationGetIndexScan(Relation indexRelation,
scan->keyData = NULL;
scan->kill_prior_tuple = false;
scan->ignore_killed_tuples = true; /* default setting */
scan->keys_are_unique = false; /* may be set by amrescan */
scan->ignore_killed_tuples = true; /* default setting */
scan->keys_are_unique = false; /* may be set by amrescan */
scan->got_tuple = false;
scan->opaque = NULL;
@@ -201,6 +201,7 @@ systable_beginscan(Relation heapRelation,
/* We assume it's a system index, so index_openr is OK */
sysscan->irel = irel = index_openr(indexRelname);
/*
* Change attribute numbers to be index column numbers.
*
@@ -210,7 +211,7 @@ systable_beginscan(Relation heapRelation,
for (i = 0; i < nkeys; i++)
{
Assert(key[i].sk_attno == irel->rd_index->indkey[i]);
key[i].sk_attno = i+1;
key[i].sk_attno = i + 1;
}
sysscan->iscan = index_beginscan(heapRelation, irel, snapshot,
nkeys, key);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.61 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.62 2002/09/04 20:31:09 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -272,8 +272,8 @@ index_beginscan(Relation heapRelation,
PointerGetDatum(key)));
/*
* Save additional parameters into the scandesc. Everything else
* was set up by RelationGetIndexScan.
* Save additional parameters into the scandesc. Everything else was
* set up by RelationGetIndexScan.
*/
scan->heapRelation = heapRelation;
scan->xs_snapshot = snapshot;
@@ -293,7 +293,7 @@ index_beginscan(Relation heapRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
* cannot change). Note that this is also called when first starting
* cannot change). Note that this is also called when first starting
* an indexscan; see RelationGetIndexScan.
* ----------------
*/
@@ -305,8 +305,8 @@ index_rescan(IndexScanDesc scan, ScanKey key)
SCAN_CHECKS;
GET_SCAN_PROCEDURE(rescan, amrescan);
scan->kill_prior_tuple = false; /* for safety */
scan->keys_are_unique = false; /* may be set by amrescan */
scan->kill_prior_tuple = false; /* for safety */
scan->keys_are_unique = false; /* may be set by amrescan */
scan->got_tuple = false;
OidFunctionCall2(procedure,
@@ -375,7 +375,7 @@ index_restrpos(IndexScanDesc scan)
SCAN_CHECKS;
GET_SCAN_PROCEDURE(restrpos, amrestrpos);
scan->kill_prior_tuple = false; /* for safety */
scan->kill_prior_tuple = false; /* for safety */
scan->got_tuple = false;
OidFunctionCall1(procedure, PointerGetDatum(scan));
@@ -385,7 +385,7 @@ index_restrpos(IndexScanDesc scan)
* index_getnext - get the next heap tuple from a scan
*
* The result is the next heap tuple satisfying the scan keys and the
* snapshot, or NULL if no more matching tuples exist. On success,
* snapshot, or NULL if no more matching tuples exist. On success,
* the buffer containing the heap tuple is pinned (the pin will be dropped
* at the next index_getnext or index_endscan). The index TID corresponding
* to the heap tuple can be obtained if needed from scan->currentItemData.
@@ -409,8 +409,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
scan->kill_prior_tuple = false;
/*
* Can skip entering the index AM if we already got a tuple
* and it must be unique.
* Can skip entering the index AM if we already got a tuple and it
* must be unique.
*/
if (scan->keys_are_unique && scan->got_tuple)
return NULL;
@@ -454,9 +454,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
* index AM to not return it on future indexscans.
*
* We told heap_fetch to keep a pin on the buffer, so we can
* re-access the tuple here. But we must re-lock the buffer first.
* Also, it's just barely possible for an update of hint bits to
* occur here.
* re-access the tuple here. But we must re-lock the buffer
* first. Also, it's just barely possible for an update of hint
* bits to occur here.
*/
LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
sv_infomask = heapTuple->t_data->t_infomask;
@@ -497,7 +497,7 @@ bool
index_getnext_indexitem(IndexScanDesc scan,
ScanDirection direction)
{
bool found;
bool found;
SCAN_CHECKS;
@@ -642,10 +642,11 @@ index_getprocinfo(Relation irel,
procId = loc[procindex];
/*
* Complain if function was not found during IndexSupportInitialize.
* This should not happen unless the system tables contain bogus
* entries for the index opclass. (If an AM wants to allow a
* support function to be optional, it can use index_getprocid.)
* Complain if function was not found during
* IndexSupportInitialize. This should not happen unless the
* system tables contain bogus entries for the index opclass. (If
* an AM wants to allow a support function to be optional, it can
* use index_getprocid.)
*/
if (!RegProcedureIsValid(procId))
elog(ERROR, "Missing support function %d for attribute %d of index %s",

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.95 2002/08/06 02:36:33 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,14 +119,14 @@ top:
*
* NOTE: obviously, _bt_check_unique can only detect keys that are
* already in the index; so it cannot defend against concurrent
* insertions of the same key. We protect against that by means
* of holding a write lock on the target page. Any other would-be
* insertions of the same key. We protect against that by means of
* holding a write lock on the target page. Any other would-be
* inserter of the same key must acquire a write lock on the same
* target page, so only one would-be inserter can be making the check
* at one time. Furthermore, once we are past the check we hold
* write locks continuously until we have performed our insertion,
* so no later inserter can fail to see our insertion. (This
* requires some care in _bt_insertonpg.)
* at one time. Furthermore, once we are past the check we hold write
* locks continuously until we have performed our insertion, so no
* later inserter can fail to see our insertion. (This requires some
* care in _bt_insertonpg.)
*
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
@@ -205,15 +205,16 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
if (offset <= maxoff)
{
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
* handling NULLs - and so we must not use _bt_compare in real
* comparison, but only for ordering/finding items on pages. -
* vadim 03/24/97
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
*/
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
curitemid = PageGetItemId(page, offset);
/*
* We can skip the heap fetch if the item is marked killed.
*/
@@ -226,10 +227,11 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
{
/* it is a duplicate */
TransactionId xwait =
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
ReleaseBuffer(hbuffer);
/*
* If this tuple is being updated by other transaction
* then we have to wait for its commit/abort.
@@ -252,8 +254,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
{
/*
* Hmm, if we can't see the tuple, maybe it can be
* marked killed. This logic should match index_getnext
* and btgettuple.
* marked killed. This logic should match
* index_getnext and btgettuple.
*/
uint16 sv_infomask;
@@ -421,7 +423,7 @@ _bt_insertonpg(Relation rel,
{
/* step right one page */
BlockNumber rblkno = lpageop->btpo_next;
Buffer rbuf;
Buffer rbuf;
/*
* must write-lock next page before releasing write lock on

View File

@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.91 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.92 2002/09/04 20:31:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -315,24 +315,28 @@ btgettuple(PG_FUNCTION_ARGS)
* buffer, too.
*/
_bt_restscan(scan);
/*
* Check to see if we should kill the previously-fetched tuple.
*/
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->btso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples:
* we mark the buffer dirty but don't make a WAL log entry.
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->btso_curbuf);
}
/*
* Now continue the scan.
*/
@@ -645,15 +649,15 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* If this is first deletion on this page, trade in read
* lock for a really-exclusive write lock. Then, step
* back one and re-examine the item, because other backends
* might have inserted item(s) while we weren't holding
* the lock!
* back one and re-examine the item, because other
* backends might have inserted item(s) while we weren't
* holding the lock!
*
* We assume that only concurrent insertions, not deletions,
* can occur while we're not holding the page lock (the caller
* should hold a suitable relation lock to ensure this).
* Therefore, the item we want to delete is either in the
* same slot as before, or some slot to its right.
* can occur while we're not holding the page lock (the
* caller should hold a suitable relation lock to ensure
* this). Therefore, the item we want to delete is either
* in the same slot as before, or some slot to its right.
* Rechecking the same slot is necessary and sufficient to
* get back in sync after any insertions.
*/
@@ -675,19 +679,19 @@ btbulkdelete(PG_FUNCTION_ARGS)
}
/*
* In either case, we now need to back up the scan one item,
* so that the next cycle will re-examine the same offnum on
* this page.
* In either case, we now need to back up the scan one
* item, so that the next cycle will re-examine the same
* offnum on this page.
*
* For now, just hack the current-item index. Will need to
* be smarter when deletion includes removal of empty
* index pages.
*
* We must decrement ip_posid in all cases but one: if the
* page was formerly rightmost but was split while we didn't
* hold the lock, and ip_posid is pointing to item 1, then
* ip_posid now points at the high key not a valid data item.
* In this case we do want to step forward.
* page was formerly rightmost but was split while we
* didn't hold the lock, and ip_posid is pointing to item
* 1, then ip_posid now points at the high key not a valid
* data item. In this case we do want to step forward.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (current->ip_posid >= P_FIRSTDATAKEY(opaque))

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.50 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.51 2002/09/04 20:31:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@
#include "executor/execdebug.h"
static int _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map);
static int _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map);
/*
@@ -178,7 +178,7 @@ _bt_formitem(IndexTuple itup)
* example.
*
* Furthermore, we detect the case where the index is unique and we have
* equality quals for all columns. In this case there can be at most one
* equality quals for all columns. In this case there can be at most one
* (visible) matching tuple. index_getnext uses this to avoid uselessly
* continuing the scan after finding one match.
*
@@ -439,8 +439,8 @@ _bt_orderkeys(IndexScanDesc scan)
so->numberOfKeys = new_numberOfKeys;
/*
* If unique index and we have equality keys for all columns,
* set keys_are_unique flag for higher levels.
* If unique index and we have equality keys for all columns, set
* keys_are_unique flag for higher levels.
*/
if (allEqualSoFar && relation->rd_index->indisunique &&
relation->rd_rel->relnatts == new_numberOfKeys)

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.27 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.28 2002/09/04 20:31:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@ rtgettuple(PG_FUNCTION_ARGS)
{
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
bool res;
bool res;
/* if we have it cached in the scan desc, just return the value */
if (rtscancache(s, dir))

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.74 2002/06/25 17:26:11 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.75 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,6 +223,7 @@ rtinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.53 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.54 2002/09/04 20:31:13 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -27,7 +27,7 @@
* Flag indicating that we are bootstrapping.
*
* Transaction ID generation is disabled during bootstrap; we just use
* BootstrapTransactionId. Also, the transaction ID status-check routines
* BootstrapTransactionId. Also, the transaction ID status-check routines
* are short-circuited; they claim that BootstrapTransactionId has already
* committed, allowing tuples already inserted to be seen immediately.
* ----------------

View File

@@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.50 2002/06/11 13:40:50 wieck Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.51 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,12 +68,12 @@ GetNewTransactionId(void)
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit (and in fact could cause a
* deadlock against GetSnapshotData). So for now, assume atomicity.
* Note that readers of PGPROC xid field should be careful to fetch the
* value only once, rather than assume they can read it multiple times
* and get the same answer each time.
* Note that readers of PGPROC xid field should be careful to fetch
* the value only once, rather than assume they can read it multiple
* times and get the same answer each time.
*
* A solution to the atomic-store problem would be to give each PGPROC its
* own spinlock used only for fetching/storing that PGPROC's xid.
* A solution to the atomic-store problem would be to give each PGPROC
* its own spinlock used only for fetching/storing that PGPROC's xid.
* (SInvalLock would then mean primarily that PROCs couldn't be added/
* removed while holding the lock.)
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.131 2002/08/30 22:18:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.132 2002/09/04 20:31:13 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -265,7 +265,6 @@ SetTransactionFlushEnabled(bool state)
{
TransactionFlushState = (state == true);
}
#endif
@@ -421,7 +420,7 @@ CommandCounterIncrement(void)
TransactionState s = CurrentTransactionState;
s->commandId += 1;
if (s->commandId == FirstCommandId) /* check for overflow */
if (s->commandId == FirstCommandId) /* check for overflow */
elog(ERROR, "You may only have 2^32-1 commands per transaction");
/* Propagate new command ID into query snapshots, if set */
@@ -517,8 +516,8 @@ void
RecordTransactionCommit(void)
{
/*
* If we made neither any XLOG entries nor any temp-rel updates,
* we can omit recording the transaction commit at all.
* If we made neither any XLOG entries nor any temp-rel updates, we
* can omit recording the transaction commit at all.
*/
if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate)
{
@@ -531,10 +530,10 @@ RecordTransactionCommit(void)
START_CRIT_SECTION();
/*
* We only need to log the commit in xlog if the transaction made any
* transaction-controlled XLOG entries. (Otherwise, its XID appears
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
* We only need to log the commit in xlog if the transaction made
* any transaction-controlled XLOG entries. (Otherwise, its XID
* appears nowhere in permanent storage, so no one else will ever
* care if it committed.)
*/
if (MyLastRecPtr.xrecoff != 0)
{
@@ -560,20 +559,20 @@ RecordTransactionCommit(void)
}
/*
* We must flush our XLOG entries to disk if we made any XLOG entries,
* whether in or out of transaction control. For example, if we
* reported a nextval() result to the client, this ensures that any
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
* We must flush our XLOG entries to disk if we made any XLOG
* entries, whether in or out of transaction control. For
* example, if we reported a nextval() result to the client, this
* ensures that any XLOG record generated by nextval will hit the
* disk before we report the transaction committed.
*/
if (MyXactMadeXLogEntry)
{
/*
* Sleep before flush! So we can flush more than one commit
* records per single fsync. (The idea is some other backend may
* do the XLogFlush while we're sleeping. This needs work still,
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
* records per single fsync. (The idea is some other backend
* may do the XLogFlush while we're sleeping. This needs work
* still, because on most Unixen, the minimum select() delay
* is 10msec or more, which is way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if there
* are fewer than CommitSiblings other backends with active
@@ -593,13 +592,14 @@ RecordTransactionCommit(void)
}
/*
* We must mark the transaction committed in clog if its XID appears
* either in permanent rels or in local temporary rels. We test
* this by seeing if we made transaction-controlled entries *OR*
* local-rel tuple updates. Note that if we made only the latter,
* we have not emitted an XLOG record for our commit, and so in the
* event of a crash the clog update might be lost. This is okay
* because no one else will ever care whether we committed.
* We must mark the transaction committed in clog if its XID
* appears either in permanent rels or in local temporary rels.
* We test this by seeing if we made transaction-controlled
* entries *OR* local-rel tuple updates. Note that if we made
* only the latter, we have not emitted an XLOG record for our
* commit, and so in the event of a crash the clog update might be
* lost. This is okay because no one else will ever care whether
* we committed.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
TransactionIdCommit(xid);
@@ -628,6 +628,7 @@ AtCommit_Cache(void)
* Clean up the relation cache.
*/
AtEOXact_RelationCache(true);
/*
* Make catalog changes visible to all backends.
*/
@@ -698,8 +699,8 @@ RecordTransactionAbort(void)
{
/*
* If we made neither any transaction-controlled XLOG entries nor any
* temp-rel updates, we can omit recording the transaction abort at all.
* No one will ever care that it aborted.
* temp-rel updates, we can omit recording the transaction abort at
* all. No one will ever care that it aborted.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
{
@@ -716,11 +717,12 @@ RecordTransactionAbort(void)
START_CRIT_SECTION();
/*
* We only need to log the abort in XLOG if the transaction made any
* transaction-controlled XLOG entries. (Otherwise, its XID appears
* nowhere in permanent storage, so no one else will ever care if it
* committed.) We do not flush XLOG to disk in any case, since the
* default assumption after a crash would be that we aborted, anyway.
* We only need to log the abort in XLOG if the transaction made
* any transaction-controlled XLOG entries. (Otherwise, its XID
* appears nowhere in permanent storage, so no one else will ever
* care if it committed.) We do not flush XLOG to disk in any
* case, since the default assumption after a crash would be that
* we aborted, anyway.
*/
if (MyLastRecPtr.xrecoff != 0)
{
@@ -1165,8 +1167,8 @@ StartTransactionCommand(bool preventChain)
TransactionState s = CurrentTransactionState;
/*
* Remember if caller wants to prevent autocommit-off chaining.
* This is only allowed if not already in a transaction block.
* Remember if caller wants to prevent autocommit-off chaining. This
* is only allowed if not already in a transaction block.
*/
suppressChain = preventChain;
if (preventChain && s->blockState != TBLOCK_DEFAULT)
@@ -1260,16 +1262,18 @@ CommitTransactionCommand(bool forceCommit)
{
/*
* If we aren't in a transaction block, and we are doing
* autocommit, just do our usual transaction commit. But
* if we aren't doing autocommit, start a transaction block
* automatically by switching to INPROGRESS state. (We handle
* this choice here, and not earlier, so that an explicit BEGIN
* issued in autocommit-off mode won't issue strange warnings.)
* autocommit, just do our usual transaction commit. But if
* we aren't doing autocommit, start a transaction block
* automatically by switching to INPROGRESS state. (We handle
* this choice here, and not earlier, so that an explicit
* BEGIN issued in autocommit-off mode won't issue strange
* warnings.)
*
* Autocommit mode is forced by either a true forceCommit parameter
* to me, or a true preventChain parameter to the preceding
* StartTransactionCommand call. This is needed so that commands
* like VACUUM can ensure that the right things happen.
* Autocommit mode is forced by either a true forceCommit
* parameter to me, or a true preventChain parameter to the
* preceding StartTransactionCommand call. This is needed so
* that commands like VACUUM can ensure that the right things
* happen.
*/
case TBLOCK_DEFAULT:
if (autocommit || forceCommit || suppressChain)
@@ -1442,9 +1446,9 @@ BeginTransactionBlock(void)
s->blockState = TBLOCK_BEGIN;
/*
* do begin processing. NOTE: if you put anything here, check that
* it behaves properly in both autocommit-on and autocommit-off modes.
* In the latter case we will already have done some work in the new
* do begin processing. NOTE: if you put anything here, check that it
* behaves properly in both autocommit-on and autocommit-off modes. In
* the latter case we will already have done some work in the new
* transaction.
*/

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.105 2002/09/02 02:47:01 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.106 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -131,13 +131,13 @@ bool InRecovery = false;
*
* Note that XLOG records inserted outside transaction control are not
* reflected into MyLastRecPtr. They do, however, cause MyXactMadeXLogEntry
* to be set true. The latter can be used to test whether the current xact
* to be set true. The latter can be used to test whether the current xact
* made any loggable changes (including out-of-xact changes, such as
* sequence updates).
*
* When we insert/update/delete a tuple in a temporary relation, we do not
* make any XLOG record, since we don't care about recovering the state of
* the temp rel after a crash. However, we will still need to remember
* the temp rel after a crash. However, we will still need to remember
* whether our transaction committed or aborted in that case. So, we must
* set MyXactMadeTempRelUpdate true to indicate that the XID will be of
* interest later.
@@ -151,7 +151,7 @@ bool MyXactMadeTempRelUpdate = false;
/*
* ProcLastRecPtr points to the start of the last XLOG record inserted by the
* current backend. It is updated for all inserts, transaction-controlled
* or not. ProcLastRecEnd is similar but points to end+1 of last record.
* or not. ProcLastRecEnd is similar but points to end+1 of last record.
*/
static XLogRecPtr ProcLastRecPtr = {0, 0};
@@ -162,7 +162,7 @@ XLogRecPtr ProcLastRecEnd = {0, 0};
* (which is almost but not quite the same as a pointer to the most recent
* CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
* hold the Insert lock). See XLogInsert for details. We are also allowed
* hold the Insert lock). See XLogInsert for details. We are also allowed
* to update from XLogCtl->Insert.RedoRecPtr if we hold the info_lck;
* see GetRedoRecPtr.
*/
@@ -766,7 +766,7 @@ begin:;
/*
* We do not acquire SInvalLock here because of possible deadlock.
* Anyone who wants to inspect other procs' logRec must acquire
* WALInsertLock, instead. A better solution would be a per-PROC
* WALInsertLock, instead. A better solution would be a per-PROC
* spinlock, but no time for that before 7.2 --- tgl 12/19/01.
*/
MyProc->logRec = RecPtr;
@@ -1283,26 +1283,27 @@ XLogFlush(XLogRecPtr record)
/*
* If we still haven't flushed to the request point then we have a
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
* problem; most likely, the requested flush point is past end of
* XLOG. This has been seen to occur when a disk page has a corrupted
* LSN.
*
* Formerly we treated this as a PANIC condition, but that hurts the
* system's robustness rather than helping it: we do not want to take
* down the whole system due to corruption on one data page. In
* particular, if the bad page is encountered again during recovery then
* we would be unable to restart the database at all! (This scenario
* has actually happened in the field several times with 7.1 releases.
* Note that we cannot get here while InRedo is true, but if the bad
* page is brought in and marked dirty during recovery then
* particular, if the bad page is encountered again during recovery
* then we would be unable to restart the database at all! (This
* scenario has actually happened in the field several times with 7.1
* releases. Note that we cannot get here while InRedo is true, but if
* the bad page is brought in and marked dirty during recovery then
* CreateCheckpoint will try to flush it at the end of recovery.)
*
* The current approach is to ERROR under normal conditions, but only
* WARNING during recovery, so that the system can be brought up even if
* there's a corrupt LSN. Note that for calls from xact.c, the ERROR
* will be promoted to PANIC since xact.c calls this routine inside a
* critical section. However, calls from bufmgr.c are not within
* critical sections and so we will not force a restart for a bad LSN
* on a data page.
* WARNING during recovery, so that the system can be brought up even
* if there's a corrupt LSN. Note that for calls from xact.c, the
* ERROR will be promoted to PANIC since xact.c calls this routine
* inside a critical section. However, calls from bufmgr.c are not
* within critical sections and so we will not force a restart for a
* bad LSN on a data page.
*/
if (XLByteLT(LogwrtResult.Flush, record))
elog(InRecovery ? WARNING : ERROR,
@@ -1565,7 +1566,7 @@ PreallocXlogFiles(XLogRecPtr endptr)
XLByteToPrevSeg(endptr, _logId, _logSeg);
if ((endptr.xrecoff - 1) % XLogSegSize >=
(uint32) (0.75 * XLogSegSize))
(uint32) (0.75 * XLogSegSize))
{
NextLogSeg(_logId, _logSeg);
use_existent = true;
@@ -1618,8 +1619,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
/*
* Before deleting the file, see if it can be recycled as
* a future log segment. We allow recycling segments up
* to XLOGfileslop segments beyond the current
* XLOG location.
* to XLOGfileslop segments beyond the current XLOG
* location.
*/
if (InstallXLogFileSegment(endlogId, endlogSeg, path,
true, XLOGfileslop,
@@ -2196,7 +2197,7 @@ ReadControlFile(void)
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
elog(PANIC,
"The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
"\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
"\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
"\tIt looks like you need to initdb.",
ControlFile->catalog_version_no, CATALOG_VERSION_NO);
if (ControlFile->blcksz != BLCKSZ)
@@ -2221,7 +2222,7 @@ ReadControlFile(void)
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
elog(PANIC,
"The database cluster was initialized with FUNC_MAX_ARGS %d,\n"
"The database cluster was initialized with FUNC_MAX_ARGS %d,\n"
"\tbut the backend was compiled with FUNC_MAX_ARGS %d.\n"
"\tIt looks like you need to recompile or initdb.",
ControlFile->funcMaxArgs, FUNC_MAX_ARGS);
@@ -2235,21 +2236,21 @@ ReadControlFile(void)
#else
if (ControlFile->enableIntTimes != FALSE)
elog(PANIC,
"The database cluster was initialized with HAVE_INT64_TIMESTAMP\n"
"\tbut the backend was compiled without HAVE_INT64_TIMESTAMP.\n"
"The database cluster was initialized with HAVE_INT64_TIMESTAMP\n"
"\tbut the backend was compiled without HAVE_INT64_TIMESTAMP.\n"
"\tIt looks like you need to recompile or initdb.");
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
elog(PANIC,
"The database cluster was initialized with LOCALE_NAME_BUFLEN %d,\n"
"\tbut the backend was compiled with LOCALE_NAME_BUFLEN %d.\n"
"\tbut the backend was compiled with LOCALE_NAME_BUFLEN %d.\n"
"\tIt looks like you need to initdb.",
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN);
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
elog(PANIC,
"The database cluster was initialized with LC_COLLATE '%s',\n"
"The database cluster was initialized with LC_COLLATE '%s',\n"
"\twhich is not recognized by setlocale().\n"
"\tIt looks like you need to initdb.",
ControlFile->lc_collate);
@@ -3019,19 +3020,19 @@ CreateCheckPoint(bool shutdown)
}
/*
* Get UNDO record ptr - this is oldest of PGPROC->logRec values. We do
* this while holding insert lock to ensure that we won't miss any
* Get UNDO record ptr - this is oldest of PGPROC->logRec values. We
* do this while holding insert lock to ensure that we won't miss any
* about-to-commit transactions (UNDO must include all xacts that have
* commits after REDO point).
*
* XXX temporarily ifdef'd out to avoid three-way deadlock condition:
* GetUndoRecPtr needs to grab SInvalLock to ensure that it is looking
* at a stable set of proc records, but grabbing SInvalLock while holding
* WALInsertLock is no good. GetNewTransactionId may cause a WAL record
* to be written while holding XidGenLock, and GetSnapshotData needs to
* get XidGenLock while holding SInvalLock, so there's a risk of deadlock.
* Need to find a better solution. See pgsql-hackers discussion of
* 17-Dec-01.
* at a stable set of proc records, but grabbing SInvalLock while
* holding WALInsertLock is no good. GetNewTransactionId may cause a
* WAL record to be written while holding XidGenLock, and
* GetSnapshotData needs to get XidGenLock while holding SInvalLock,
* so there's a risk of deadlock. Need to find a better solution. See
* pgsql-hackers discussion of 17-Dec-01.
*/
#ifdef NOT_USED
checkPoint.undo = GetUndoRecPtr();
@@ -3298,9 +3299,7 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
}
#endif
else
{
return NULL;
}
if (!doit)
return method;

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.140 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.141 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -191,7 +191,7 @@ usage(void)
{
fprintf(stderr,
gettext("Usage:\n"
" postgres -boot [-d level] [-D datadir] [-F] [-o file] [-x num] dbname\n"
" postgres -boot [-d level] [-D datadir] [-F] [-o file] [-x num] dbname\n"
" -d 1-5 debug mode\n"
" -D datadir data directory\n"
" -F turn off fsync\n"
@@ -235,9 +235,7 @@ BootstrapMain(int argc, char *argv[])
* If we are running under the postmaster, this is done already.
*/
if (!IsUnderPostmaster)
{
MemoryContextInit();
}
/*
* process command arguments
@@ -260,18 +258,19 @@ BootstrapMain(int argc, char *argv[])
potential_DataDir = optarg;
break;
case 'd':
{
/* Turn on debugging for the bootstrap process. */
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
sprintf(debugstr, "debug%s", optarg);
SetConfigOption("server_min_messages", debugstr,
PGC_POSTMASTER, PGC_S_ARGV);
SetConfigOption("client_min_messages", debugstr,
PGC_POSTMASTER, PGC_S_ARGV);
pfree(debugstr);
{
/* Turn on debugging for the bootstrap process. */
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
sprintf(debugstr, "debug%s", optarg);
SetConfigOption("server_min_messages", debugstr,
PGC_POSTMASTER, PGC_S_ARGV);
SetConfigOption("client_min_messages", debugstr,
PGC_POSTMASTER, PGC_S_ARGV);
pfree(debugstr);
break;
}
break;
}
break;
case 'F':
SetConfigOption("fsync", "false", PGC_POSTMASTER, PGC_S_ARGV);
break;
@@ -391,7 +390,8 @@ BootstrapMain(int argc, char *argv[])
InitDummyProcess(); /* needed to get LWLocks */
CreateDummyCaches();
CreateCheckPoint(false);
SetSavedRedoRecPtr(); /* pass redo ptr back to postmaster */
SetSavedRedoRecPtr(); /* pass redo ptr back to
* postmaster */
proc_exit(0); /* done */
case BS_XLOG_STARTUP:
@@ -587,7 +587,7 @@ DefineAttr(char *name, char *type, int attnum)
namestrcpy(&attrtypes[attnum]->attname, name);
elog(DEBUG3, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
typeoid = gettype(type);
@@ -640,14 +640,15 @@ DefineAttr(char *name, char *type, int attnum)
}
attrtypes[attnum]->attcacheoff = -1;
attrtypes[attnum]->atttypmod = -1;
/*
* Mark as "not null" if type is fixed-width and prior columns are too.
* This corresponds to case where column can be accessed directly via
* C struct declaration.
* Mark as "not null" if type is fixed-width and prior columns are
* too. This corresponds to case where column can be accessed directly
* via C struct declaration.
*/
if (attlen > 0)
{
int i;
int i;
for (i = 0; i < attnum; i++)
{
@@ -829,7 +830,7 @@ cleanup()
* and not an OID at all, until the first reference to a type not known in
* Procid[]. At that point it will read and cache pg_type in the Typ array,
* and subsequently return a real OID (and set the global pointer Ap to
* point at the found row in Typ). So caller must check whether Typ is
* point at the found row in Typ). So caller must check whether Typ is
* still NULL to determine what the return value is!
* ----------------
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.76 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.77 2002/09/04 20:31:13 momjian Exp $
*
* NOTES
* See acl.h.
@@ -91,22 +91,25 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
foreach(j, grantees)
{
PrivGrantee *grantee = (PrivGrantee *) lfirst(j);
AclItem aclitem;
AclItem aclitem;
uint32 idtype;
if (grantee->username)
{
aclitem.ai_id = get_usesysid(grantee->username);
aclitem. ai_id = get_usesysid(grantee->username);
idtype = ACL_IDTYPE_UID;
}
else if (grantee->groupname)
{
aclitem.ai_id = get_grosysid(grantee->groupname);
aclitem. ai_id = get_grosysid(grantee->groupname);
idtype = ACL_IDTYPE_GID;
}
else
{
aclitem.ai_id = ACL_ID_WORLD;
aclitem. ai_id = ACL_ID_WORLD;
idtype = ACL_IDTYPE_WORLD;
}
@@ -376,7 +379,7 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
char replaces[Natts_pg_proc];
oid = LookupFuncNameTypeNames(func->funcname, func->funcargs,
stmt->is_grant ? "GRANT" : "REVOKE");
stmt->is_grant ? "GRANT" : "REVOKE");
relation = heap_openr(ProcedureRelationName, RowExclusiveLock);
tuple = SearchSysCache(PROCOID,
@@ -569,8 +572,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
aclcheck_error(ACLCHECK_NOT_OWNER, nspname);
/*
* If there's no ACL, create a default using the pg_namespace.nspowner
* field.
* If there's no ACL, create a default using the
* pg_namespace.nspowner field.
*/
aclDatum = SysCacheGetAttr(NAMESPACENAME, tuple,
Anum_pg_namespace_nspacl,
@@ -1163,8 +1166,8 @@ pg_namespace_aclcheck(Oid nsp_oid, Oid userid, AclMode mode)
Acl *acl;
/*
* If we have been assigned this namespace as a temp namespace,
* assume we have all grantable privileges on it.
* If we have been assigned this namespace as a temp namespace, assume
* we have all grantable privileges on it.
*/
if (isTempNamespace(nsp_oid))
return ACLCHECK_OK;

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.47 2002/06/20 20:29:26 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.48 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,8 +91,8 @@ GetDatabasePath(Oid tblNode)
bool
IsSystemRelation(Relation relation)
{
return IsSystemNamespace(RelationGetNamespace(relation)) ||
IsToastNamespace(RelationGetNamespace(relation));
return IsSystemNamespace(RelationGetNamespace(relation)) ||
IsToastNamespace(RelationGetNamespace(relation));
}
/*
@@ -104,10 +104,10 @@ IsSystemRelation(Relation relation)
bool
IsSystemClass(Form_pg_class reltuple)
{
Oid relnamespace = reltuple->relnamespace;
Oid relnamespace = reltuple->relnamespace;
return IsSystemNamespace(relnamespace) ||
IsToastNamespace(relnamespace);
return IsSystemNamespace(relnamespace) ||
IsToastNamespace(relnamespace);
}
/*
@@ -129,9 +129,9 @@ IsToastRelation(Relation relation)
bool
IsToastClass(Form_pg_class reltuple)
{
Oid relnamespace = reltuple->relnamespace;
Oid relnamespace = reltuple->relnamespace;
return IsToastNamespace(relnamespace);
return IsToastNamespace(relnamespace);
}
/*

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.8 2002/08/02 18:15:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.9 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,46 +69,46 @@ typedef enum ObjectClasses
/* expansible list of ObjectAddresses */
typedef struct ObjectAddresses
{
ObjectAddress *refs; /* => palloc'd array */
int numrefs; /* current number of references */
int maxrefs; /* current size of palloc'd array */
struct ObjectAddresses *link; /* list link for use in recursion */
ObjectAddress *refs; /* => palloc'd array */
int numrefs; /* current number of references */
int maxrefs; /* current size of palloc'd array */
struct ObjectAddresses *link; /* list link for use in recursion */
} ObjectAddresses;
/* for find_expr_references_walker */
typedef struct
{
ObjectAddresses addrs; /* addresses being accumulated */
List *rtables; /* list of rangetables to resolve Vars */
ObjectAddresses addrs; /* addresses being accumulated */
List *rtables; /* list of rangetables to resolve Vars */
} find_expr_references_context;
/*
* Because not all system catalogs have predetermined OIDs, we build a table
* mapping between ObjectClasses and OIDs. This is done at most once per
* mapping between ObjectClasses and OIDs. This is done at most once per
* backend run, to minimize lookup overhead.
*/
static bool object_classes_initialized = false;
static bool object_classes_initialized = false;
static Oid object_classes[MAX_OCLASS];
static bool recursiveDeletion(const ObjectAddress *object,
DropBehavior behavior,
const ObjectAddress *callingObject,
ObjectAddresses *pending,
Relation depRel);
DropBehavior behavior,
const ObjectAddress *callingObject,
ObjectAddresses *pending,
Relation depRel);
static void doDeletion(const ObjectAddress *object);
static bool find_expr_references_walker(Node *node,
find_expr_references_context *context);
find_expr_references_context *context);
static void eliminate_duplicate_dependencies(ObjectAddresses *addrs);
static int object_address_comparator(const void *a, const void *b);
static void init_object_addresses(ObjectAddresses *addrs);
static void add_object_address(ObjectClasses oclass, Oid objectId, int32 subId,
ObjectAddresses *addrs);
ObjectAddresses *addrs);
static void add_exact_object_address(const ObjectAddress *object,
ObjectAddresses *addrs);
ObjectAddresses *addrs);
static void del_object_address(const ObjectAddress *object,
ObjectAddresses *addrs);
ObjectAddresses *addrs);
static void del_object_address_by_index(int index, ObjectAddresses *addrs);
static void term_object_addresses(ObjectAddresses *addrs);
static void init_object_classes(void);
@@ -131,12 +131,12 @@ void
performDeletion(const ObjectAddress *object,
DropBehavior behavior)
{
char *objDescription;
Relation depRel;
char *objDescription;
Relation depRel;
/*
* Get object description for possible use in failure message.
* Must do this before deleting it ...
* Get object description for possible use in failure message. Must do
* this before deleting it ...
*/
objDescription = getObjectDescription(object);
@@ -165,7 +165,7 @@ performDeletion(const ObjectAddress *object,
* callingObject is NULL at the outer level, else identifies the object that
* we recursed from (the reference object that someone else needs to delete).
* pending is a linked list of objects that outer recursion levels want to
* delete. We remove the target object from any outer-level list it may
* delete. We remove the target object from any outer-level list it may
* appear in.
* depRel is the already-open pg_depend relation.
*
@@ -178,7 +178,7 @@ performDeletion(const ObjectAddress *object,
* This is even more complex than one could wish, because it is possible for
* the same pair of objects to be related by both NORMAL and AUTO (or IMPLICIT)
* dependencies. (Since one or both paths might be indirect, it's very hard
* to prevent this; we must cope instead.) If there is an AUTO/IMPLICIT
* to prevent this; we must cope instead.) If there is an AUTO/IMPLICIT
* deletion path then we should perform the deletion, and not fail because
* of the NORMAL dependency. So, when we hit a NORMAL dependency we don't
* immediately decide we've failed; instead we stick the NORMAL dependent
@@ -191,7 +191,7 @@ performDeletion(const ObjectAddress *object,
*
* Note: in the case where the AUTO path is traversed first, we will never
* see the NORMAL dependency path because of the pg_depend removals done in
* recursive executions of step 1. The pending list is necessary essentially
* recursive executions of step 1. The pending list is necessary essentially
* just to make the behavior independent of the order in which pg_depend
* entries are visited.
*/
@@ -202,16 +202,16 @@ recursiveDeletion(const ObjectAddress *object,
ObjectAddresses *pending,
Relation depRel)
{
bool ok = true;
char *objDescription;
ObjectAddresses mypending;
ScanKeyData key[3];
int nkeys;
SysScanDesc scan;
HeapTuple tup;
ObjectAddress otherObject;
ObjectAddress owningObject;
bool amOwned = false;
bool ok = true;
char *objDescription;
ObjectAddresses mypending;
ScanKeyData key[3];
int nkeys;
SysScanDesc scan;
HeapTuple tup;
ObjectAddress otherObject;
ObjectAddress owningObject;
bool amOwned = false;
/*
* Get object description for possible use in messages. Must do this
@@ -231,8 +231,8 @@ recursiveDeletion(const ObjectAddress *object,
* ensures that we avoid infinite recursion in the case of cycles.
* Also, some dependency types require extra processing here.
*
* When dropping a whole object (subId = 0), remove all pg_depend
* records for its sub-objects too.
* When dropping a whole object (subId = 0), remove all pg_depend records
* for its sub-objects too.
*/
ScanKeyEntryInitialize(&key[0], 0x0,
Anum_pg_depend_classid, F_OIDEQ,
@@ -255,7 +255,7 @@ recursiveDeletion(const ObjectAddress *object,
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
otherObject.classId = foundDep->refclassid;
otherObject.objectId = foundDep->refobjid;
@@ -268,9 +268,10 @@ recursiveDeletion(const ObjectAddress *object,
/* no problem */
break;
case DEPENDENCY_INTERNAL:
/*
* This object is part of the internal implementation
* of another object. We have three cases:
* This object is part of the internal implementation of
* another object. We have three cases:
*
* 1. At the outermost recursion level, disallow the DROP.
* (We just elog here, rather than considering this drop
@@ -279,30 +280,32 @@ recursiveDeletion(const ObjectAddress *object,
*/
if (callingObject == NULL)
{
char *otherObjDesc = getObjectDescription(&otherObject);
char *otherObjDesc = getObjectDescription(&otherObject);
elog(ERROR, "Cannot drop %s because %s requires it"
"\n\tYou may drop %s instead",
objDescription, otherObjDesc, otherObjDesc);
}
/*
* 2. When recursing from the other end of this dependency,
* it's okay to continue with the deletion. This holds when
* recursing from a whole object that includes the nominal
* other end as a component, too.
* 2. When recursing from the other end of this
* dependency, it's okay to continue with the deletion.
* This holds when recursing from a whole object that
* includes the nominal other end as a component, too.
*/
if (callingObject->classId == otherObject.classId &&
callingObject->objectId == otherObject.objectId &&
(callingObject->objectSubId == otherObject.objectSubId ||
callingObject->objectSubId == 0))
(callingObject->objectSubId == otherObject.objectSubId ||
callingObject->objectSubId == 0))
break;
/*
* 3. When recursing from anyplace else, transform this
* deletion request into a delete of the other object.
* (This will be an error condition iff RESTRICT mode.)
* In this case we finish deleting my dependencies except
* for the INTERNAL link, which will be needed to cause
* the owning object to recurse back to me.
* (This will be an error condition iff RESTRICT mode.) In
* this case we finish deleting my dependencies except for
* the INTERNAL link, which will be needed to cause the
* owning object to recurse back to me.
*/
if (amOwned) /* shouldn't happen */
elog(ERROR, "recursiveDeletion: multiple INTERNAL dependencies for %s",
@@ -312,6 +315,7 @@ recursiveDeletion(const ObjectAddress *object,
/* "continue" bypasses the simple_heap_delete call below */
continue;
case DEPENDENCY_PIN:
/*
* Should not happen; PIN dependencies should have zeroes
* in the depender fields...
@@ -331,10 +335,10 @@ recursiveDeletion(const ObjectAddress *object,
systable_endscan(scan);
/*
* CommandCounterIncrement here to ensure that preceding changes
* are all visible; in particular, that the above deletions of pg_depend
* entries are visible. That prevents infinite recursion in case of
* a dependency loop (which is perfectly legal).
* CommandCounterIncrement here to ensure that preceding changes are
* all visible; in particular, that the above deletions of pg_depend
* entries are visible. That prevents infinite recursion in case of a
* dependency loop (which is perfectly legal).
*/
CommandCounterIncrement();
@@ -368,21 +372,21 @@ recursiveDeletion(const ObjectAddress *object,
/*
* Step 2: scan pg_depend records that link to this object, showing
* the things that depend on it. Recursively delete those things.
* (We don't delete the pg_depend records here, as the recursive call
* will do that.) Note it's important to delete the dependent objects
* the things that depend on it. Recursively delete those things. (We
* don't delete the pg_depend records here, as the recursive call will
* do that.) Note it's important to delete the dependent objects
* before the referenced one, since the deletion routines might do
* things like try to update the pg_class record when deleting a
* check constraint.
* things like try to update the pg_class record when deleting a check
* constraint.
*
* Again, when dropping a whole object (subId = 0), find pg_depend
* records for its sub-objects too.
*
* NOTE: because we are using SnapshotNow, if a recursive call deletes
* any pg_depend tuples that our scan hasn't yet visited, we will not see
* them as good when we do visit them. This is essential for correct
* behavior if there are multiple dependency paths between two objects
* --- else we might try to delete an already-deleted object.
* any pg_depend tuples that our scan hasn't yet visited, we will not
* see them as good when we do visit them. This is essential for
* correct behavior if there are multiple dependency paths between two
* objects --- else we might try to delete an already-deleted object.
*/
ScanKeyEntryInitialize(&key[0], 0x0,
Anum_pg_depend_refclassid, F_OIDEQ,
@@ -405,7 +409,7 @@ recursiveDeletion(const ObjectAddress *object,
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
otherObject.classId = foundDep->classid;
otherObject.objectId = foundDep->objid;
@@ -418,9 +422,9 @@ recursiveDeletion(const ObjectAddress *object,
{
/*
* We've found a restricted object (or at least one
* that's not deletable along this path). Log for later
* processing. (Note it's okay if the same object gets
* into mypending multiple times.)
* that's not deletable along this path). Log for
* later processing. (Note it's okay if the same
* object gets into mypending multiple times.)
*/
add_exact_object_address(&otherObject, &mypending);
}
@@ -437,6 +441,7 @@ recursiveDeletion(const ObjectAddress *object,
break;
case DEPENDENCY_AUTO:
case DEPENDENCY_INTERNAL:
/*
* We propagate the DROP without complaint even in the
* RESTRICT case. (However, normal dependencies on the
@@ -451,6 +456,7 @@ recursiveDeletion(const ObjectAddress *object,
ok = false;
break;
case DEPENDENCY_PIN:
/*
* For a PIN dependency we just elog immediately; there
* won't be any others to report.
@@ -469,19 +475,19 @@ recursiveDeletion(const ObjectAddress *object,
/*
* If we found no restricted objects, or got rid of them all via other
* paths, we're in good shape. Otherwise continue step 2 by processing
* the remaining restricted objects.
* paths, we're in good shape. Otherwise continue step 2 by
* processing the remaining restricted objects.
*/
if (mypending.numrefs > 0)
{
/*
* Successively extract and delete each remaining object.
* Note that the right things will happen if some of these objects
* Successively extract and delete each remaining object. Note
* that the right things will happen if some of these objects
* depend on others: we'll report/delete each one exactly once.
*/
while (mypending.numrefs > 0)
{
ObjectAddress otherObject = mypending.refs[0];
ObjectAddress otherObject = mypending.refs[0];
del_object_address_by_index(0, &mypending);
@@ -508,19 +514,21 @@ recursiveDeletion(const ObjectAddress *object,
doDeletion(object);
/*
* Delete any comments associated with this object. (This is a convenient
* place to do it instead of having every object type know to do it.)
* Delete any comments associated with this object. (This is a
* convenient place to do it instead of having every object type know
* to do it.)
*/
DeleteComments(object->objectId, object->classId, object->objectSubId);
/*
* If this object is mentioned in any caller's pending list, remove it.
* If this object is mentioned in any caller's pending list, remove
* it.
*/
del_object_address(object, pending);
/*
* CommandCounterIncrement here to ensure that preceding changes
* are all visible.
* CommandCounterIncrement here to ensure that preceding changes are
* all visible.
*/
CommandCounterIncrement();
@@ -543,37 +551,37 @@ doDeletion(const ObjectAddress *object)
switch (getObjectClass(object))
{
case OCLASS_CLASS:
{
HeapTuple relTup;
char relKind;
/*
* Need the relkind to figure out how to drop.
*/
relTup = SearchSysCache(RELOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(relTup))
elog(ERROR, "doDeletion: Relation %u does not exist",
object->objectId);
relKind = ((Form_pg_class) GETSTRUCT(relTup))->relkind;
ReleaseSysCache(relTup);
if (relKind == RELKIND_INDEX)
{
Assert(object->objectSubId == 0);
index_drop(object->objectId);
}
else
{
if (object->objectSubId != 0)
RemoveAttributeById(object->objectId,
object->objectSubId);
HeapTuple relTup;
char relKind;
/*
* Need the relkind to figure out how to drop.
*/
relTup = SearchSysCache(RELOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(relTup))
elog(ERROR, "doDeletion: Relation %u does not exist",
object->objectId);
relKind = ((Form_pg_class) GETSTRUCT(relTup))->relkind;
ReleaseSysCache(relTup);
if (relKind == RELKIND_INDEX)
{
Assert(object->objectSubId == 0);
index_drop(object->objectId);
}
else
heap_drop_with_catalog(object->objectId);
{
if (object->objectSubId != 0)
RemoveAttributeById(object->objectId,
object->objectSubId);
else
heap_drop_with_catalog(object->objectId);
}
break;
}
break;
}
case OCLASS_PROC:
RemoveFunctionById(object->objectId);
@@ -644,7 +652,7 @@ doDeletion(const ObjectAddress *object)
* It can be NIL if no such variables are expected.
*
* XXX is it important to create dependencies on the datatypes mentioned in
* the expression? In most cases this would be redundant (eg, a ref to an
* the expression? In most cases this would be redundant (eg, a ref to an
* operator indirectly references its input and output datatypes), but I'm
* not quite convinced there are no cases where we need it.
*/
@@ -653,7 +661,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
Node *expr, List *rtable,
DependencyType behavior)
{
find_expr_references_context context;
find_expr_references_context context;
init_object_addresses(&context.addrs);
@@ -755,8 +763,8 @@ find_expr_references_walker(Node *node,
bool result;
/*
* Add whole-relation refs for each plain relation mentioned in the
* subquery's rtable. (Note: query_tree_walker takes care of
* Add whole-relation refs for each plain relation mentioned in
* the subquery's rtable. (Note: query_tree_walker takes care of
* recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
* to do that here.)
*/
@@ -787,7 +795,7 @@ find_expr_references_walker(Node *node,
static void
eliminate_duplicate_dependencies(ObjectAddresses *addrs)
{
ObjectAddress *priorobj;
ObjectAddress *priorobj;
int oldref,
newrefs;
@@ -803,13 +811,14 @@ eliminate_duplicate_dependencies(ObjectAddresses *addrs)
newrefs = 1;
for (oldref = 1; oldref < addrs->numrefs; oldref++)
{
ObjectAddress *thisobj = addrs->refs + oldref;
ObjectAddress *thisobj = addrs->refs + oldref;
if (priorobj->classId == thisobj->classId &&
priorobj->objectId == thisobj->objectId)
{
if (priorobj->objectSubId == thisobj->objectSubId)
continue; /* identical, so drop thisobj */
/*
* If we have a whole-object reference and a reference to a
* part of the same object, we don't need the whole-object
@@ -852,9 +861,10 @@ object_address_comparator(const void *a, const void *b)
return -1;
if (obja->objectId > objb->objectId)
return 1;
/*
* We sort the subId as an unsigned int so that 0 will come first.
* See logic in eliminate_duplicate_dependencies.
* We sort the subId as an unsigned int so that 0 will come first. See
* logic in eliminate_duplicate_dependencies.
*/
if ((unsigned int) obja->objectSubId < (unsigned int) objb->objectSubId)
return -1;
@@ -894,7 +904,7 @@ static void
add_object_address(ObjectClasses oclass, Oid objectId, int32 subId,
ObjectAddresses *addrs)
{
ObjectAddress *item;
ObjectAddress *item;
/* enlarge array if needed */
if (addrs->numrefs >= addrs->maxrefs)
@@ -920,7 +930,7 @@ static void
add_exact_object_address(const ObjectAddress *object,
ObjectAddresses *addrs)
{
ObjectAddress *item;
ObjectAddress *item;
/* enlarge array if needed */
if (addrs->numrefs >= addrs->maxrefs)
@@ -937,7 +947,7 @@ add_exact_object_address(const ObjectAddress *object,
/*
* If an ObjectAddresses array contains any matches for the given object,
* remove it/them. Also, do the same in any linked ObjectAddresses arrays.
* remove it/them. Also, do the same in any linked ObjectAddresses arrays.
*/
static void
del_object_address(const ObjectAddress *object,
@@ -948,9 +958,9 @@ del_object_address(const ObjectAddress *object,
int i;
/* Scan backwards to simplify deletion logic. */
for (i = addrs->numrefs-1; i >= 0; i--)
for (i = addrs->numrefs - 1; i >= 0; i--)
{
ObjectAddress *thisobj = addrs->refs + i;
ObjectAddress *thisobj = addrs->refs + i;
if (object->classId == thisobj->classId &&
object->objectId == thisobj->objectId)
@@ -1134,150 +1144,150 @@ getObjectDescription(const ObjectAddress *object)
break;
case OCLASS_CAST:
{
Relation castDesc;
ScanKeyData skey[1];
SysScanDesc rcscan;
HeapTuple tup;
Form_pg_cast castForm;
{
Relation castDesc;
ScanKeyData skey[1];
SysScanDesc rcscan;
HeapTuple tup;
Form_pg_cast castForm;
castDesc = heap_openr(CastRelationName, AccessShareLock);
castDesc = heap_openr(CastRelationName, AccessShareLock);
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
rcscan = systable_beginscan(castDesc, CastOidIndex, true,
SnapshotNow, 1, skey);
rcscan = systable_beginscan(castDesc, CastOidIndex, true,
SnapshotNow, 1, skey);
tup = systable_getnext(rcscan);
tup = systable_getnext(rcscan);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Cast %u does not exist",
object->objectId);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Cast %u does not exist",
object->objectId);
castForm = (Form_pg_cast) GETSTRUCT(tup);
castForm = (Form_pg_cast) GETSTRUCT(tup);
appendStringInfo(&buffer, "cast from %s to %s",
format_type_be(castForm->castsource),
format_type_be(castForm->casttarget));
appendStringInfo(&buffer, "cast from %s to %s",
format_type_be(castForm->castsource),
format_type_be(castForm->casttarget));
systable_endscan(rcscan);
heap_close(castDesc, AccessShareLock);
break;
}
systable_endscan(rcscan);
heap_close(castDesc, AccessShareLock);
break;
}
case OCLASS_CONSTRAINT:
{
Relation conDesc;
ScanKeyData skey[1];
SysScanDesc rcscan;
HeapTuple tup;
Form_pg_constraint con;
conDesc = heap_openr(ConstraintRelationName, AccessShareLock);
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
rcscan = systable_beginscan(conDesc, ConstraintOidIndex, true,
SnapshotNow, 1, skey);
tup = systable_getnext(rcscan);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Constraint %u does not exist",
object->objectId);
con = (Form_pg_constraint) GETSTRUCT(tup);
if (OidIsValid(con->conrelid))
{
appendStringInfo(&buffer, "constraint %s on ",
NameStr(con->conname));
getRelationDescription(&buffer, con->conrelid);
}
else
{
appendStringInfo(&buffer, "constraint %s",
NameStr(con->conname));
}
Relation conDesc;
ScanKeyData skey[1];
SysScanDesc rcscan;
HeapTuple tup;
Form_pg_constraint con;
systable_endscan(rcscan);
heap_close(conDesc, AccessShareLock);
break;
}
conDesc = heap_openr(ConstraintRelationName, AccessShareLock);
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
rcscan = systable_beginscan(conDesc, ConstraintOidIndex, true,
SnapshotNow, 1, skey);
tup = systable_getnext(rcscan);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Constraint %u does not exist",
object->objectId);
con = (Form_pg_constraint) GETSTRUCT(tup);
if (OidIsValid(con->conrelid))
{
appendStringInfo(&buffer, "constraint %s on ",
NameStr(con->conname));
getRelationDescription(&buffer, con->conrelid);
}
else
{
appendStringInfo(&buffer, "constraint %s",
NameStr(con->conname));
}
systable_endscan(rcscan);
heap_close(conDesc, AccessShareLock);
break;
}
case OCLASS_CONVERSION:
{
HeapTuple conTup;
{
HeapTuple conTup;
conTup = SearchSysCache(CONOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(conTup))
elog(ERROR, "getObjectDescription: Conversion %u does not exist",
object->objectId);
appendStringInfo(&buffer, "conversion %s",
NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname));
ReleaseSysCache(conTup);
break;
}
conTup = SearchSysCache(CONOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(conTup))
elog(ERROR, "getObjectDescription: Conversion %u does not exist",
object->objectId);
appendStringInfo(&buffer, "conversion %s",
NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname));
ReleaseSysCache(conTup);
break;
}
case OCLASS_DEFAULT:
{
Relation attrdefDesc;
ScanKeyData skey[1];
SysScanDesc adscan;
HeapTuple tup;
Form_pg_attrdef attrdef;
ObjectAddress colobject;
{
Relation attrdefDesc;
ScanKeyData skey[1];
SysScanDesc adscan;
HeapTuple tup;
Form_pg_attrdef attrdef;
ObjectAddress colobject;
attrdefDesc = heap_openr(AttrDefaultRelationName, AccessShareLock);
attrdefDesc = heap_openr(AttrDefaultRelationName, AccessShareLock);
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
adscan = systable_beginscan(attrdefDesc, AttrDefaultOidIndex, true,
SnapshotNow, 1, skey);
adscan = systable_beginscan(attrdefDesc, AttrDefaultOidIndex, true,
SnapshotNow, 1, skey);
tup = systable_getnext(adscan);
tup = systable_getnext(adscan);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Default %u does not exist",
object->objectId);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Default %u does not exist",
object->objectId);
attrdef = (Form_pg_attrdef) GETSTRUCT(tup);
attrdef = (Form_pg_attrdef) GETSTRUCT(tup);
colobject.classId = RelOid_pg_class;
colobject.objectId = attrdef->adrelid;
colobject.objectSubId = attrdef->adnum;
colobject.classId = RelOid_pg_class;
colobject.objectId = attrdef->adrelid;
colobject.objectSubId = attrdef->adnum;
appendStringInfo(&buffer, "default for %s",
getObjectDescription(&colobject));
appendStringInfo(&buffer, "default for %s",
getObjectDescription(&colobject));
systable_endscan(adscan);
heap_close(attrdefDesc, AccessShareLock);
break;
}
systable_endscan(adscan);
heap_close(attrdefDesc, AccessShareLock);
break;
}
case OCLASS_LANGUAGE:
{
HeapTuple langTup;
{
HeapTuple langTup;
langTup = SearchSysCache(LANGOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(langTup))
elog(ERROR, "getObjectDescription: Language %u does not exist",
object->objectId);
appendStringInfo(&buffer, "language %s",
NameStr(((Form_pg_language) GETSTRUCT(langTup))->lanname));
ReleaseSysCache(langTup);
break;
}
langTup = SearchSysCache(LANGOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(langTup))
elog(ERROR, "getObjectDescription: Language %u does not exist",
object->objectId);
appendStringInfo(&buffer, "language %s",
NameStr(((Form_pg_language) GETSTRUCT(langTup))->lanname));
ReleaseSysCache(langTup);
break;
}
case OCLASS_OPERATOR:
appendStringInfo(&buffer, "operator %s",
@@ -1285,126 +1295,126 @@ getObjectDescription(const ObjectAddress *object)
break;
case OCLASS_OPCLASS:
{
HeapTuple opcTup;
Form_pg_opclass opcForm;
HeapTuple amTup;
Form_pg_am amForm;
char *nspname;
{
HeapTuple opcTup;
Form_pg_opclass opcForm;
HeapTuple amTup;
Form_pg_am amForm;
char *nspname;
opcTup = SearchSysCache(CLAOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(opcTup))
elog(ERROR, "cache lookup of opclass %u failed",
object->objectId);
opcForm = (Form_pg_opclass) GETSTRUCT(opcTup);
opcTup = SearchSysCache(CLAOID,
ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(opcTup))
elog(ERROR, "cache lookup of opclass %u failed",
object->objectId);
opcForm = (Form_pg_opclass) GETSTRUCT(opcTup);
/* Qualify the name if not visible in search path */
if (OpclassIsVisible(object->objectId))
nspname = NULL;
else
nspname = get_namespace_name(opcForm->opcnamespace);
/* Qualify the name if not visible in search path */
if (OpclassIsVisible(object->objectId))
nspname = NULL;
else
nspname = get_namespace_name(opcForm->opcnamespace);
appendStringInfo(&buffer, "operator class %s",
quote_qualified_identifier(nspname,
NameStr(opcForm->opcname)));
appendStringInfo(&buffer, "operator class %s",
quote_qualified_identifier(nspname,
NameStr(opcForm->opcname)));
amTup = SearchSysCache(AMOID,
ObjectIdGetDatum(opcForm->opcamid),
0, 0, 0);
if (!HeapTupleIsValid(amTup))
elog(ERROR, "syscache lookup for AM %u failed",
opcForm->opcamid);
amForm = (Form_pg_am) GETSTRUCT(amTup);
amTup = SearchSysCache(AMOID,
ObjectIdGetDatum(opcForm->opcamid),
0, 0, 0);
if (!HeapTupleIsValid(amTup))
elog(ERROR, "syscache lookup for AM %u failed",
opcForm->opcamid);
amForm = (Form_pg_am) GETSTRUCT(amTup);
appendStringInfo(&buffer, " for %s",
NameStr(amForm->amname));
appendStringInfo(&buffer, " for %s",
NameStr(amForm->amname));
ReleaseSysCache(amTup);
ReleaseSysCache(opcTup);
break;
}
ReleaseSysCache(amTup);
ReleaseSysCache(opcTup);
break;
}
case OCLASS_REWRITE:
{
Relation ruleDesc;
ScanKeyData skey[1];
SysScanDesc rcscan;
HeapTuple tup;
Form_pg_rewrite rule;
{
Relation ruleDesc;
ScanKeyData skey[1];
SysScanDesc rcscan;
HeapTuple tup;
Form_pg_rewrite rule;
ruleDesc = heap_openr(RewriteRelationName, AccessShareLock);
ruleDesc = heap_openr(RewriteRelationName, AccessShareLock);
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
rcscan = systable_beginscan(ruleDesc, RewriteOidIndex, true,
SnapshotNow, 1, skey);
rcscan = systable_beginscan(ruleDesc, RewriteOidIndex, true,
SnapshotNow, 1, skey);
tup = systable_getnext(rcscan);
tup = systable_getnext(rcscan);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Rule %u does not exist",
object->objectId);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Rule %u does not exist",
object->objectId);
rule = (Form_pg_rewrite) GETSTRUCT(tup);
rule = (Form_pg_rewrite) GETSTRUCT(tup);
appendStringInfo(&buffer, "rule %s on ",
NameStr(rule->rulename));
getRelationDescription(&buffer, rule->ev_class);
appendStringInfo(&buffer, "rule %s on ",
NameStr(rule->rulename));
getRelationDescription(&buffer, rule->ev_class);
systable_endscan(rcscan);
heap_close(ruleDesc, AccessShareLock);
break;
}
systable_endscan(rcscan);
heap_close(ruleDesc, AccessShareLock);
break;
}
case OCLASS_TRIGGER:
{
Relation trigDesc;
ScanKeyData skey[1];
SysScanDesc tgscan;
HeapTuple tup;
Form_pg_trigger trig;
{
Relation trigDesc;
ScanKeyData skey[1];
SysScanDesc tgscan;
HeapTuple tup;
Form_pg_trigger trig;
trigDesc = heap_openr(TriggerRelationName, AccessShareLock);
trigDesc = heap_openr(TriggerRelationName, AccessShareLock);
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
ScanKeyEntryInitialize(&skey[0], 0x0,
ObjectIdAttributeNumber, F_OIDEQ,
ObjectIdGetDatum(object->objectId));
tgscan = systable_beginscan(trigDesc, TriggerOidIndex, true,
SnapshotNow, 1, skey);
tgscan = systable_beginscan(trigDesc, TriggerOidIndex, true,
SnapshotNow, 1, skey);
tup = systable_getnext(tgscan);
tup = systable_getnext(tgscan);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Trigger %u does not exist",
object->objectId);
if (!HeapTupleIsValid(tup))
elog(ERROR, "getObjectDescription: Trigger %u does not exist",
object->objectId);
trig = (Form_pg_trigger) GETSTRUCT(tup);
trig = (Form_pg_trigger) GETSTRUCT(tup);
appendStringInfo(&buffer, "trigger %s on ",
NameStr(trig->tgname));
getRelationDescription(&buffer, trig->tgrelid);
appendStringInfo(&buffer, "trigger %s on ",
NameStr(trig->tgname));
getRelationDescription(&buffer, trig->tgrelid);
systable_endscan(tgscan);
heap_close(trigDesc, AccessShareLock);
break;
}
systable_endscan(tgscan);
heap_close(trigDesc, AccessShareLock);
break;
}
case OCLASS_SCHEMA:
{
char *nspname;
{
char *nspname;
nspname = get_namespace_name(object->objectId);
if (!nspname)
elog(ERROR, "getObjectDescription: Schema %u does not exist",
object->objectId);
appendStringInfo(&buffer, "schema %s", nspname);
break;
}
nspname = get_namespace_name(object->objectId);
if (!nspname)
elog(ERROR, "getObjectDescription: Schema %u does not exist",
object->objectId);
appendStringInfo(&buffer, "schema %s", nspname);
break;
}
default:
appendStringInfo(&buffer, "unknown object %u %u %d",
@@ -1424,7 +1434,7 @@ static void
getRelationDescription(StringInfo buffer, Oid relid)
{
HeapTuple relTup;
Form_pg_class relForm;
Form_pg_class relForm;
char *nspname;
char *relname;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.224 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.225 2002/09/04 20:31:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -67,10 +67,10 @@ static void AddNewRelationTuple(Relation pg_class_desc,
Oid new_rel_oid, Oid new_type_oid,
char relkind);
static void AddNewRelationType(const char *typeName,
Oid typeNamespace,
Oid new_rel_oid,
char new_rel_kind,
Oid new_type_oid);
Oid typeNamespace,
Oid new_rel_oid,
char new_rel_kind,
Oid new_type_oid);
static void RelationRemoveInheritance(Relation relation);
static void StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin);
static void StoreRelCheck(Relation rel, char *ccname, char *ccbin);
@@ -214,14 +214,14 @@ heap_create(const char *relname,
Oid relid;
Oid dbid = shared_relation ? InvalidOid : MyDatabaseId;
bool nailme = false;
RelFileNode rnode;
RelFileNode rnode;
Relation rel;
/*
* sanity checks
*/
if (!allow_system_table_mods &&
(IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace)) &&
(IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace)) &&
IsNormalProcessingMode())
elog(ERROR, "cannot create %s.%s: "
"system catalog modifications are currently disallowed",
@@ -256,21 +256,13 @@ heap_create(const char *relname,
relid = RelOid_pg_class;
}
else if (strcmp(ShadowRelationName, relname) == 0)
{
relid = RelOid_pg_shadow;
}
else if (strcmp(GroupRelationName, relname) == 0)
{
relid = RelOid_pg_group;
}
else if (strcmp(DatabaseRelationName, relname) == 0)
{
relid = RelOid_pg_database;
}
else
{
relid = newoid();
}
}
else
relid = newoid();
@@ -293,7 +285,8 @@ heap_create(const char *relname,
nailme);
/*
* have the storage manager create the relation's disk file, if wanted.
* have the storage manager create the relation's disk file, if
* wanted.
*/
if (storage_create)
heap_storage_create(rel);
@@ -396,7 +389,7 @@ CheckAttributeNames(TupleDesc tupdesc, char relkind)
*/
for (i = 0; i < natts; i++)
{
Oid att_type = tupdesc->attrs[i]->atttypid;
Oid att_type = tupdesc->attrs[i]->atttypid;
if (att_type == UNKNOWNOID)
elog(WARNING, "Attribute \"%s\" has an unknown type"
@@ -427,8 +420,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
Relation rel;
CatalogIndexState indstate;
int natts = tupdesc->natts;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/*
* open pg_attribute and its indexes.
@@ -451,7 +444,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
(*dpp)->attcacheoff = -1;
tup = heap_addheader(Natts_pg_attribute,
false,
false,
ATTRIBUTE_TUPLE_SIZE,
(void *) *dpp);
@@ -463,7 +456,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
myself.classId = RelOid_pg_class;
myself.objectId = new_rel_oid;
myself.objectSubId = i+1;
myself.objectSubId = i + 1;
referenced.classId = RelOid_pg_type;
referenced.objectId = (*dpp)->atttypid;
referenced.objectSubId = 0;
@@ -475,7 +468,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
/*
* Next we add the system attributes. Skip OID if rel has no OIDs.
* Skip all for a view or type relation. We don't bother with making
* datatype dependencies here, since presumably all these types are pinned.
* datatype dependencies here, since presumably all these types are
* pinned.
*/
if (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE)
{
@@ -488,7 +482,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
Form_pg_attribute attStruct;
tup = heap_addheader(Natts_pg_attribute,
false,
false,
ATTRIBUTE_TUPLE_SIZE,
(void *) *dpp);
@@ -497,9 +491,9 @@ AddNewAttributeTuples(Oid new_rel_oid,
attStruct->attrelid = new_rel_oid;
/*
* Unneeded since they should be OK in the constant data
* anyway
*/
* Unneeded since they should be OK in the constant data
* anyway
*/
/* attStruct->attstattarget = 0; */
/* attStruct->attcacheoff = -1; */
@@ -590,7 +584,7 @@ AddNewRelationTuple(Relation pg_class_desc,
* ----------------
*/
tup = heap_addheader(Natts_pg_class_fixed,
true,
true,
CLASS_TUPLE_SIZE,
(void *) new_rel_reltup);
@@ -628,13 +622,13 @@ AddNewRelationType(const char *typeName,
*
* OLD and probably obsolete comments:
*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
* how much space to allocate for the type. An oid is the type used
* for a set definition. When a user requests a set, what they
* actually get is the oid of a tuple in the pg_proc catalog, so the
* size of the "set" is the size of an oid. Similarly, byval being
* true makes sets much easier, and it isn't used by anything else.
* The sizes are set to oid size because it makes implementing sets MUCH
* easier, and no one (we hope) uses these fields to figure out how
* much space to allocate for the type. An oid is the type used for a
* set definition. When a user requests a set, what they actually get
* is the oid of a tuple in the pg_proc catalog, so the size of the
* "set" is the size of an oid. Similarly, byval being true makes sets
* much easier, and it isn't used by anything else.
*/
TypeCreate(typeName, /* type name */
typeNamespace, /* type namespace */
@@ -689,10 +683,10 @@ heap_create_with_catalog(const char *relname,
if (get_relname_relid(relname, relnamespace))
elog(ERROR, "Relation '%s' already exists", relname);
/*
* Create the relcache entry (mostly dummy at this point) and the
* physical disk file. (If we fail further down, it's the smgr's
* physical disk file. (If we fail further down, it's the smgr's
* responsibility to remove the disk file again.)
*
* NB: create a physical file only if it's not a view or type relation.
@@ -702,7 +696,7 @@ heap_create_with_catalog(const char *relname,
tupdesc,
shared_relation,
(relkind != RELKIND_VIEW &&
relkind != RELKIND_COMPOSITE_TYPE),
relkind != RELKIND_COMPOSITE_TYPE),
allow_system_table_mods);
/* Fetch the relation OID assigned by heap_create */
@@ -746,14 +740,14 @@ heap_create_with_catalog(const char *relname,
AddNewAttributeTuples(new_rel_oid, new_rel_desc->rd_att, relkind);
/*
* make a dependency link to force the relation to be deleted if
* its namespace is. Skip this in bootstrap mode, since we don't
* make dependencies while bootstrapping.
* make a dependency link to force the relation to be deleted if its
* namespace is. Skip this in bootstrap mode, since we don't make
* dependencies while bootstrapping.
*/
if (!IsBootstrapProcessingMode())
{
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
myself.classId = RelOid_pg_class;
myself.objectId = new_rel_oid;
@@ -768,8 +762,9 @@ heap_create_with_catalog(const char *relname,
* store constraints and defaults passed in the tupdesc, if any.
*
* NB: this may do a CommandCounterIncrement and rebuild the relcache
* entry, so the relation must be valid and self-consistent at this point.
* In particular, there are not yet constraints and defaults anywhere.
* entry, so the relation must be valid and self-consistent at this
* point. In particular, there are not yet constraints and defaults
* anywhere.
*/
StoreConstraints(new_rel_desc, tupdesc);
@@ -788,8 +783,8 @@ heap_create_with_catalog(const char *relname,
* RelationRemoveInheritance
*
* Formerly, this routine checked for child relations and aborted the
* deletion if any were found. Now we rely on the dependency mechanism
* to check for or delete child relations. By the time we get here,
* deletion if any were found. Now we rely on the dependency mechanism
* to check for or delete child relations. By the time we get here,
* there are no children and we need only remove any pg_inherits rows
* linking this relation to its parent(s).
*/
@@ -811,9 +806,7 @@ RelationRemoveInheritance(Relation relation)
SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
simple_heap_delete(catalogRelation, &tuple->t_self);
}
systable_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
@@ -863,7 +856,7 @@ void
DeleteAttributeTuples(Oid relid)
{
Relation attrel;
SysScanDesc scan;
SysScanDesc scan;
ScanKeyData key[1];
HeapTuple atttup;
@@ -880,9 +873,7 @@ DeleteAttributeTuples(Oid relid)
/* Delete all the matching tuples */
while ((atttup = systable_getnext(scan)) != NULL)
{
simple_heap_delete(attrel, &atttup->t_self);
}
/* Clean up after the scan */
systable_endscan(scan);
@@ -907,10 +898,10 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
/*
* Grab an exclusive lock on the target table, which we will NOT
* release until end of transaction. (In the simple case where
* we are directly dropping this column, AlterTableDropColumn already
* did this ... but when cascading from a drop of some other object,
* we may not have any lock.)
* release until end of transaction. (In the simple case where we are
* directly dropping this column, AlterTableDropColumn already did
* this ... but when cascading from a drop of some other object, we
* may not have any lock.)
*/
rel = relation_open(relid, AccessExclusiveLock);
@@ -920,7 +911,7 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
ObjectIdGetDatum(relid),
Int16GetDatum(attnum),
0, 0);
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "RemoveAttributeById: Failed to find attribute %d in relation %u",
attnum, relid);
attStruct = (Form_pg_attribute) GETSTRUCT(tuple);
@@ -985,7 +976,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum,
/* There should be at most one matching tuple, but we loop anyway */
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
ObjectAddress object;
ObjectAddress object;
object.classId = RelationGetRelid(attrdef_rel);
object.objectId = HeapTupleGetOid(tuple);
@@ -1007,7 +998,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum,
/*
* RemoveAttrDefaultById
*
* Remove a pg_attrdef entry specified by OID. This is the guts of
* Remove a pg_attrdef entry specified by OID. This is the guts of
* attribute-default removal. Note it should be called via performDeletion,
* not directly.
*/
@@ -1058,7 +1049,7 @@ RemoveAttrDefaultById(Oid attrdefId)
ObjectIdGetDatum(myrelid),
Int16GetDatum(myattnum),
0, 0);
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "RemoveAttrDefaultById: cache lookup failed for rel %u attr %d",
myrelid, myattnum);
@@ -1175,7 +1166,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin)
HeapTuple atttup;
Form_pg_attribute attStruct;
Oid attrdefOid;
ObjectAddress colobject,
ObjectAddress colobject,
defobject;
/*
@@ -1307,8 +1298,8 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
attNos = (int16 *) palloc(keycount * sizeof(int16));
foreach(vl, varList)
{
Var *var = (Var *) lfirst(vl);
int j;
Var *var = (Var *) lfirst(vl);
int j;
for (j = 0; j < i; j++)
if (attNos[j] == var->varattno)
@@ -1324,24 +1315,24 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
/*
* Create the Check Constraint
*/
CreateConstraintEntry(ccname, /* Constraint Name */
RelationGetNamespace(rel), /* namespace */
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
CreateConstraintEntry(ccname, /* Constraint Name */
RelationGetNamespace(rel), /* namespace */
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
RelationGetRelid(rel), /* relation */
attNos, /* attrs in the constraint */
keycount, /* # attrs in the constraint */
InvalidOid, /* not a domain constraint */
InvalidOid, /* Foreign key fields */
RelationGetRelid(rel), /* relation */
attNos, /* attrs in the constraint */
keycount, /* # attrs in the constraint */
InvalidOid, /* not a domain constraint */
InvalidOid, /* Foreign key fields */
NULL,
0,
' ',
' ',
' ',
expr, /* Tree form check constraint */
expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
ccsrc); /* Source form check constraint */
ccsrc); /* Source form check constraint */
pfree(ccsrc);
}
@@ -1366,8 +1357,8 @@ StoreConstraints(Relation rel, TupleDesc tupdesc)
/*
* Deparsing of constraint expressions will fail unless the
* just-created pg_attribute tuples for this relation are made
* visible. So, bump the command counter. CAUTION: this will
* cause a relcache entry rebuild.
* visible. So, bump the command counter. CAUTION: this will cause a
* relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -1513,12 +1504,14 @@ AddRelationRawConstraints(Relation rel,
List *listptr2;
/*
* Generate a name that does not conflict with pre-existing
* constraints, nor with any auto-generated names so far.
* Generate a name that does not conflict with
* pre-existing constraints, nor with any auto-generated
* names so far.
*/
ccname = GenerateConstraintName(RelationGetRelid(rel),
RelationGetNamespace(rel),
&constr_name_ctr);
/*
* Check against other new constraints, in case the user
* has specified a name that looks like an auto-generated
@@ -1668,7 +1661,7 @@ cookDefault(ParseState *pstate,
int32 atttypmod,
char *attname)
{
Node *expr;
Node *expr;
Assert(raw_default != NULL);
@@ -1699,18 +1692,18 @@ cookDefault(ParseState *pstate,
/*
* Check that it will be possible to coerce the expression to the
* column's type. We store the expression without coercion,
* however, to avoid premature coercion in cases like
* column's type. We store the expression without coercion, however,
* to avoid premature coercion in cases like
*
* CREATE TABLE tbl (fld timestamp DEFAULT 'now'::text);
*
* NB: this should match the code in optimizer/prep/preptlist.c that
* will actually do the coercion, to ensure we don't accept an
* unusable default expression.
* NB: this should match the code in optimizer/prep/preptlist.c that will
* actually do the coercion, to ensure we don't accept an unusable
* default expression.
*/
if (OidIsValid(atttypid))
{
Oid type_id = exprType(expr);
Oid type_id = exprType(expr);
if (type_id != atttypid)
{
@@ -1718,7 +1711,7 @@ cookDefault(ParseState *pstate,
atttypid, atttypmod, false) == NULL)
elog(ERROR, "Column \"%s\" is of type %s"
" but default expression is of type %s"
"\n\tYou will need to rewrite or cast the expression",
"\n\tYou will need to rewrite or cast the expression",
attname,
format_type_be(atttypid),
format_type_be(type_id));
@@ -1735,7 +1728,7 @@ cookDefault(ParseState *pstate,
*/
fix_opids(expr);
return(expr);
return (expr);
}
@@ -1753,7 +1746,7 @@ RemoveRelConstraints(Relation rel, const char *constrName,
{
int ndeleted = 0;
Relation conrel;
SysScanDesc conscan;
SysScanDesc conscan;
ScanKeyData key[1];
HeapTuple contup;
@@ -1773,11 +1766,11 @@ RemoveRelConstraints(Relation rel, const char *constrName,
*/
while ((contup = systable_getnext(conscan)) != NULL)
{
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(contup);
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(contup);
if (strcmp(NameStr(con->conname), constrName) == 0)
{
ObjectAddress conobj;
ObjectAddress conobj;
conobj.classId = RelationGetRelid(conrel);
conobj.objectId = HeapTupleGetOid(contup);
@@ -1815,9 +1808,7 @@ RemoveStatistics(Relation rel)
SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
simple_heap_delete(pgstatistic, &tuple->t_self);
}
systable_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock);
@@ -1836,7 +1827,7 @@ RelationTruncateIndexes(Oid heapId)
{
Relation indexRelation;
ScanKeyData entry;
SysScanDesc scan;
SysScanDesc scan;
HeapTuple indexTuple;
/* Scan pg_index to find indexes on specified heap */

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.195 2002/09/03 16:00:02 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.196 2002/09/04 20:31:14 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -320,13 +320,13 @@ UpdateRelationRelation(Relation indexRelation)
/* XXX Natts_pg_class_fixed is a hack - see pg_class.h */
tuple = heap_addheader(Natts_pg_class_fixed,
true,
true,
CLASS_TUPLE_SIZE,
(void *) indexRelation->rd_rel);
/*
* the new tuple must have the oid already chosen for the index.
* sure would be embarrassing to do this sort of thing in polite company.
* the new tuple must have the oid already chosen for the index. sure
* would be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tuple, RelationGetRelid(indexRelation));
simple_heap_insert(pg_class, tuple);
@@ -391,7 +391,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
Assert(indexTupDesc->attrs[i]->attcacheoff == -1);
new_tuple = heap_addheader(Natts_pg_attribute,
false,
false,
ATTRIBUTE_TUPLE_SIZE,
(void *) indexTupDesc->attrs[i]);
@@ -478,7 +478,7 @@ UpdateIndexRelation(Oid indexoid,
* form a tuple to insert into pg_index
*/
tuple = heap_addheader(Natts_pg_index,
false,
false,
itupLen,
(void *) indexForm);
@@ -532,8 +532,8 @@ index_create(Oid heapRelationId,
heapRelation = heap_open(heapRelationId, ShareLock);
/*
* The index will be in the same namespace as its parent table,
* and is shared across databases if and only if the parent is.
* The index will be in the same namespace as its parent table, and is
* shared across databases if and only if the parent is.
*/
namespaceId = RelationGetNamespace(heapRelation);
shared_relation = heapRelation->rd_rel->relisshared;
@@ -554,7 +554,7 @@ index_create(Oid heapRelationId,
* We cannot allow indexing a shared relation after initdb (because
* there's no way to make the entry in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
* standalone backend. However, we can at least prevent this mistake
* standalone backend. However, we can at least prevent this mistake
* under normal multi-user operation.
*/
if (shared_relation && IsUnderPostmaster)
@@ -577,6 +577,7 @@ index_create(Oid heapRelationId,
classObjectId);
indexTupDesc->tdhasoid = false;
/*
* create the index relation's relcache entry and physical disk file.
* (If we fail further down, it's the smgr's responsibility to remove
@@ -643,20 +644,20 @@ index_create(Oid heapRelationId,
* Register constraint and dependencies for the index.
*
* If the index is from a CONSTRAINT clause, construct a pg_constraint
* entry. The index is then linked to the constraint, which in turn is
* linked to the table. If it's not a CONSTRAINT, make the dependency
* directly on the table.
* entry. The index is then linked to the constraint, which in turn
* is linked to the table. If it's not a CONSTRAINT, make the
* dependency directly on the table.
*
* We don't need a dependency on the namespace, because there'll be
* an indirect dependency via our parent table.
* We don't need a dependency on the namespace, because there'll be an
* indirect dependency via our parent table.
*
* During bootstrap we can't register any dependencies, and we don't
* try to make a constraint either.
* During bootstrap we can't register any dependencies, and we don't try
* to make a constraint either.
*/
if (!IsBootstrapProcessingMode())
{
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
myself.classId = RelOid_pg_class;
myself.objectId = indexoid;
@@ -674,25 +675,25 @@ index_create(Oid heapRelationId,
else
{
elog(ERROR, "index_create: constraint must be PRIMARY or UNIQUE");
constraintType = 0; /* keep compiler quiet */
constraintType = 0; /* keep compiler quiet */
}
conOid = CreateConstraintEntry(indexRelationName,
namespaceId,
constraintType,
false, /* isDeferrable */
false, /* isDeferred */
false, /* isDeferrable */
false, /* isDeferred */
heapRelationId,
indexInfo->ii_KeyAttrNumbers,
indexInfo->ii_NumKeyAttrs,
InvalidOid, /* no domain */
InvalidOid, /* no foreign key */
InvalidOid, /* no domain */
InvalidOid, /* no foreign key */
NULL,
0,
' ',
' ',
' ',
NULL, /* no check constraint */
NULL, /* no check constraint */
NULL,
NULL);
@@ -807,6 +808,7 @@ index_drop(Oid indexId)
* fix RELATION relation
*/
DeleteRelationTuple(indexId);
/*
* fix ATTRIBUTE relation
*/
@@ -839,11 +841,12 @@ index_drop(Oid indexId)
smgrunlink(DEFAULT_SMGR, userIndexRelation);
/*
* We are presently too lazy to attempt to compute the new correct value
* of relhasindex (the next VACUUM will fix it if necessary). So there is
* no need to update the pg_class tuple for the owning relation.
* But we must send out a shared-cache-inval notice on the owning relation
* to ensure other backends update their relcache lists of indexes.
* We are presently too lazy to attempt to compute the new correct
* value of relhasindex (the next VACUUM will fix it if necessary).
* So there is no need to update the pg_class tuple for the owning
* relation. But we must send out a shared-cache-inval notice on the
* owning relation to ensure other backends update their relcache
* lists of indexes.
*/
CacheInvalidateRelcache(heapId);
@@ -1640,7 +1643,7 @@ IndexBuildHeapScan(Relation heapRelation,
* transaction.)
*/
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmin(heapTuple->t_data)))
HeapTupleHeaderGetXmin(heapTuple->t_data)))
elog(ERROR, "IndexBuildHeapScan: concurrent insert in progress");
indexIt = true;
tupleIsAlive = true;
@@ -1655,7 +1658,7 @@ IndexBuildHeapScan(Relation heapRelation,
* transaction.)
*/
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmax(heapTuple->t_data)))
HeapTupleHeaderGetXmax(heapTuple->t_data)))
elog(ERROR, "IndexBuildHeapScan: concurrent delete in progress");
indexIt = true;
tupleIsAlive = false;
@@ -1798,12 +1801,12 @@ reindex_index(Oid indexId, bool force, bool inplace)
/*
* Open our index relation and get an exclusive lock on it.
*
* Note: doing this before opening the parent heap relation means
* there's a possibility for deadlock failure against another xact
* that is doing normal accesses to the heap and index. However,
* it's not real clear why you'd be needing to do REINDEX on a table
* that's in active use, so I'd rather have the protection of making
* sure the index is locked down.
* Note: doing this before opening the parent heap relation means there's
* a possibility for deadlock failure against another xact that is
* doing normal accesses to the heap and index. However, it's not
* real clear why you'd be needing to do REINDEX on a table that's in
* active use, so I'd rather have the protection of making sure the
* index is locked down.
*/
iRel = index_open(indexId);
if (iRel == NULL)

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.101 2002/08/06 02:36:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.102 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@ CatalogOpenIndexes(Relation heapRel)
resultRelInfo = makeNode(ResultRelInfo);
resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
resultRelInfo->ri_RelationDesc = heapRel;
resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
ExecOpenIndices(resultRelInfo);
@@ -132,14 +132,14 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
* CatalogUpdateIndexes - do all the indexing work for a new catalog tuple
*
* This is a convenience routine for the common case where we only need
* to insert or update a single tuple in a system catalog. Avoid using it for
* to insert or update a single tuple in a system catalog. Avoid using it for
* multiple tuples, since opening the indexes and building the index info
* structures is moderately expensive.
*/
void
CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple)
{
CatalogIndexState indstate;
CatalogIndexState indstate;
indstate = CatalogOpenIndexes(heapRel);
CatalogIndexInsert(indstate, heapTuple);

View File

@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.34 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.35 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,10 +60,10 @@
* 2. If a TEMP table namespace has been initialized in this session, it
* is always searched just after any special namespace.
*
* 3. The system catalog namespace is always searched. If the system
* 3. The system catalog namespace is always searched. If the system
* namespace is present in the explicit path then it will be searched in
* the specified order; otherwise it will be searched after TEMP tables and
* *before* the explicit list. (It might seem that the system namespace
* *before* the explicit list. (It might seem that the system namespace
* should be implicitly last, but this behavior appears to be required by
* SQL99. Also, this provides a way to search the system namespace first
* without thereby making it the default creation target namespace.)
@@ -76,7 +76,7 @@
* In bootstrap mode, the search path is set equal to 'pg_catalog', so that
* the system namespace is the only one searched or inserted into.
* The initdb script is also careful to set search_path to 'pg_catalog' for
* its post-bootstrap standalone backend runs. Otherwise the default search
* its post-bootstrap standalone backend runs. Otherwise the default search
* path is determined by GUC. The factory default path contains the PUBLIC
* namespace (if it exists), preceded by the user's personal namespace
* (if one exists).
@@ -109,7 +109,7 @@ static bool namespaceSearchPathValid = true;
/*
* myTempNamespace is InvalidOid until and unless a TEMP namespace is set up
* in a particular backend session (this happens when a CREATE TEMP TABLE
* command is first executed). Thereafter it's the OID of the temp namespace.
* command is first executed). Thereafter it's the OID of the temp namespace.
* firstTempTransaction flags whether we've committed creation of the TEMP
* namespace or not.
*/
@@ -127,7 +127,7 @@ static Oid mySpecialNamespace = InvalidOid;
* This is the text equivalent of the search path --- it's the value
* of the GUC variable 'search_path'.
*/
char *namespace_search_path = NULL;
char *namespace_search_path = NULL;
/* Local functions */
@@ -138,11 +138,11 @@ static void RemoveTempRelationsCallback(void);
static void NamespaceCallback(Datum arg, Oid relid);
/* These don't really need to appear in any header file */
Datum pg_table_is_visible(PG_FUNCTION_ARGS);
Datum pg_type_is_visible(PG_FUNCTION_ARGS);
Datum pg_function_is_visible(PG_FUNCTION_ARGS);
Datum pg_operator_is_visible(PG_FUNCTION_ARGS);
Datum pg_opclass_is_visible(PG_FUNCTION_ARGS);
Datum pg_table_is_visible(PG_FUNCTION_ARGS);
Datum pg_type_is_visible(PG_FUNCTION_ARGS);
Datum pg_function_is_visible(PG_FUNCTION_ARGS);
Datum pg_operator_is_visible(PG_FUNCTION_ARGS);
Datum pg_opclass_is_visible(PG_FUNCTION_ARGS);
/*
@@ -230,7 +230,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
{
/* use exact schema given */
namespaceId = GetSysCacheOid(NAMESPACENAME,
CStringGetDatum(newRelation->schemaname),
CStringGetDatum(newRelation->schemaname),
0, 0, 0);
if (!OidIsValid(namespaceId))
elog(ERROR, "Namespace \"%s\" does not exist",
@@ -312,10 +312,10 @@ RelationIsVisible(Oid relid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another relation of the same name earlier in the path.
* So we must do a slow check to see if this rel would be found by
* RelnameGetRelid.
* If it is in the path, it might still not be visible; it could
* be hidden by another relation of the same name earlier in the
* path. So we must do a slow check to see if this rel would be
* found by RelnameGetRelid.
*/
char *relname = NameStr(relform->relname);
@@ -394,10 +394,10 @@ TypeIsVisible(Oid typid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another type of the same name earlier in the path.
* So we must do a slow check to see if this type would be found by
* TypenameGetTypid.
* If it is in the path, it might still not be visible; it could
* be hidden by another type of the same name earlier in the path.
* So we must do a slow check to see if this type would be found
* by TypenameGetTypid.
*/
char *typname = NameStr(typform->typname);
@@ -492,18 +492,18 @@ FuncnameGetCandidates(List *names, int nargs)
/*
* Okay, it's in the search path, but does it have the same
* arguments as something we already accepted? If so, keep
* arguments as something we already accepted? If so, keep
* only the one that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the
* normal case), then any conflicting proc must immediately
* adjoin this one in the list, so we only need to look at
* the newest result item. If we have an unordered list,
* we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting proc must immediately adjoin
* this one in the list, so we only need to look at the newest
* result item. If we have an unordered list, we have to scan
* the whole result list.
*/
if (resultList)
{
FuncCandidateList prevResult;
FuncCandidateList prevResult;
if (catlist->ordered)
{
@@ -521,8 +521,8 @@ FuncnameGetCandidates(List *names, int nargs)
prevResult = prevResult->next)
{
if (nargs == prevResult->nargs &&
memcmp(procform->proargtypes, prevResult->args,
nargs * sizeof(Oid)) == 0)
memcmp(procform->proargtypes, prevResult->args,
nargs * sizeof(Oid)) == 0)
break;
}
}
@@ -531,7 +531,7 @@ FuncnameGetCandidates(List *names, int nargs)
/* We have a match with a previous result */
Assert(pathpos != prevResult->pathpos);
if (pathpos > prevResult->pathpos)
continue; /* keep previous result */
continue; /* keep previous result */
/* replace previous result */
prevResult->pathpos = pathpos;
prevResult->oid = HeapTupleGetOid(proctup);
@@ -595,10 +595,10 @@ FunctionIsVisible(Oid funcid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another proc of the same name and arguments earlier
* in the path. So we must do a slow check to see if this is the
* same proc that would be found by FuncnameGetCandidates.
* If it is in the path, it might still not be visible; it could
* be hidden by another proc of the same name and arguments
* earlier in the path. So we must do a slow check to see if this
* is the same proc that would be found by FuncnameGetCandidates.
*/
char *proname = NameStr(procform->proname);
int nargs = procform->pronargs;
@@ -641,7 +641,7 @@ FunctionIsVisible(Oid funcid)
* identical entries in later namespaces.
*
* The returned items always have two args[] entries --- one or the other
* will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
* will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
*/
FuncCandidateList
OpernameGetCandidates(List *names, char oprkind)
@@ -707,18 +707,18 @@ OpernameGetCandidates(List *names, char oprkind)
/*
* Okay, it's in the search path, but does it have the same
* arguments as something we already accepted? If so, keep
* arguments as something we already accepted? If so, keep
* only the one that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the
* normal case), then any conflicting oper must immediately
* adjoin this one in the list, so we only need to look at
* the newest result item. If we have an unordered list,
* we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting oper must immediately adjoin
* this one in the list, so we only need to look at the newest
* result item. If we have an unordered list, we have to scan
* the whole result list.
*/
if (resultList)
{
FuncCandidateList prevResult;
FuncCandidateList prevResult;
if (catlist->ordered)
{
@@ -744,7 +744,7 @@ OpernameGetCandidates(List *names, char oprkind)
/* We have a match with a previous result */
Assert(pathpos != prevResult->pathpos);
if (pathpos > prevResult->pathpos)
continue; /* keep previous result */
continue; /* keep previous result */
/* replace previous result */
prevResult->pathpos = pathpos;
prevResult->oid = HeapTupleGetOid(opertup);
@@ -807,10 +807,11 @@ OperatorIsVisible(Oid oprid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another operator of the same name and arguments earlier
* in the path. So we must do a slow check to see if this is the
* same operator that would be found by OpernameGetCandidates.
* If it is in the path, it might still not be visible; it could
* be hidden by another operator of the same name and arguments
* earlier in the path. So we must do a slow check to see if this
* is the same operator that would be found by
* OpernameGetCandidates.
*/
char *oprname = NameStr(oprform->oprname);
FuncCandidateList clist;
@@ -882,18 +883,18 @@ OpclassGetCandidates(Oid amid)
/*
* Okay, it's in the search path, but does it have the same name
* as something we already accepted? If so, keep
* only the one that appears earlier in the search path.
* as something we already accepted? If so, keep only the one
* that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the
* normal case), then any conflicting opclass must immediately
* adjoin this one in the list, so we only need to look at
* the newest result item. If we have an unordered list,
* we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting opclass must immediately adjoin
* this one in the list, so we only need to look at the newest
* result item. If we have an unordered list, we have to scan the
* whole result list.
*/
if (resultList)
{
OpclassCandidateList prevResult;
OpclassCandidateList prevResult;
if (catlist->ordered)
{
@@ -919,7 +920,7 @@ OpclassGetCandidates(Oid amid)
/* We have a match with a previous result */
Assert(pathpos != prevResult->pathpos);
if (pathpos > prevResult->pathpos)
continue; /* keep previous result */
continue; /* keep previous result */
/* replace previous result */
prevResult->opcname_tmp = NameStr(opcform->opcname);
prevResult->pathpos = pathpos;
@@ -1019,10 +1020,10 @@ OpclassIsVisible(Oid opcid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another opclass of the same name earlier in the path.
* So we must do a slow check to see if this opclass would be found by
* OpclassnameGetOpcid.
* If it is in the path, it might still not be visible; it could
* be hidden by another opclass of the same name earlier in the
* path. So we must do a slow check to see if this opclass would
* be found by OpclassnameGetOpcid.
*/
char *opcname = NameStr(opcform->opcname);
@@ -1063,6 +1064,7 @@ DeconstructQualifiedName(List *names,
catalogname = strVal(lfirst(names));
schemaname = strVal(lsecond(names));
objname = strVal(lfirst(lnext(lnext(names))));
/*
* We check the catalog name and then ignore it.
*/
@@ -1190,7 +1192,7 @@ char *
NameListToString(List *names)
{
StringInfoData string;
List *l;
List *l;
initStringInfo(&string);
@@ -1248,11 +1250,12 @@ PopSpecialNamespace(Oid namespaceId)
/*
* FindConversionByName - find a conversion by possibly qualified name
*/
Oid FindConversionByName(List *name)
Oid
FindConversionByName(List *name)
{
char *conversion_name;
Oid namespaceId;
Oid conoid;
char *conversion_name;
Oid namespaceId;
Oid conoid;
List *lptr;
/* Convert list of names to a name and namespace */
@@ -1285,7 +1288,8 @@ Oid FindConversionByName(List *name)
/*
* FindDefaultConversionProc - find default encoding cnnversion proc
*/
Oid FindDefaultConversionProc(int4 for_encoding, int4 to_encoding)
Oid
FindDefaultConversionProc(int4 for_encoding, int4 to_encoding)
{
Oid proc;
List *lptr;
@@ -1341,13 +1345,13 @@ recomputeNamespacePath(void)
* Convert the list of names to a list of OIDs. If any names are not
* recognizable or we don't have read access, just leave them out of
* the list. (We can't raise an error, since the search_path setting
* has already been accepted.) Don't make duplicate entries, either.
* has already been accepted.) Don't make duplicate entries, either.
*/
oidlist = NIL;
foreach(l, namelist)
{
char *curname = (char *) lfirst(l);
Oid namespaceId;
char *curname = (char *) lfirst(l);
Oid namespaceId;
if (strcmp(curname, "$user") == 0)
{
@@ -1359,7 +1363,7 @@ recomputeNamespacePath(void)
0, 0, 0);
if (HeapTupleIsValid(tuple))
{
char *uname;
char *uname;
uname = NameStr(((Form_pg_shadow) GETSTRUCT(tuple))->usename);
namespaceId = GetSysCacheOid(NAMESPACENAME,
@@ -1396,9 +1400,9 @@ recomputeNamespacePath(void)
firstNS = (Oid) lfirsti(oidlist);
/*
* Add any implicitly-searched namespaces to the list. Note these
* go on the front, not the back; also notice that we do not check
* USAGE permissions for these.
* Add any implicitly-searched namespaces to the list. Note these go
* on the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
if (!intMember(PG_CATALOG_NAMESPACE, oidlist))
oidlist = lconsi(PG_CATALOG_NAMESPACE, oidlist);
@@ -1453,13 +1457,13 @@ InitTempTableNamespace(void)
Oid namespaceId;
/*
* First, do permission check to see if we are authorized to make
* temp tables. We use a nonstandard error message here since
* First, do permission check to see if we are authorized to make temp
* tables. We use a nonstandard error message here since
* "databasename: permission denied" might be a tad cryptic.
*
* Note we apply the check to the session user, not the currently
* active userid, since we are not going to change our minds about
* temp table availability during the session.
* Note we apply the check to the session user, not the currently active
* userid, since we are not going to change our minds about temp table
* availability during the session.
*/
if (pg_database_aclcheck(MyDatabaseId, GetSessionUserId(),
ACL_CREATE_TEMP) != ACLCHECK_OK)
@@ -1476,11 +1480,11 @@ InitTempTableNamespace(void)
/*
* First use of this temp namespace in this database; create it.
* The temp namespaces are always owned by the superuser. We
* leave their permissions at default --- i.e., no access except to
* superuser --- to ensure that unprivileged users can't peek
* leave their permissions at default --- i.e., no access except
* to superuser --- to ensure that unprivileged users can't peek
* at other backends' temp tables. This works because the places
* that access the temp namespace for my own backend skip permissions
* checks on it.
* that access the temp namespace for my own backend skip
* permissions checks on it.
*/
namespaceId = NamespaceCreate(namespaceName, BOOTSTRAP_USESYSID);
/* Advance command counter to make namespace visible */
@@ -1504,7 +1508,7 @@ InitTempTableNamespace(void)
firstTempTransaction = true;
namespaceSearchPathValid = false; /* need to rebuild list */
namespaceSearchPathValid = false; /* need to rebuild list */
}
/*
@@ -1516,7 +1520,7 @@ AtEOXact_Namespace(bool isCommit)
/*
* If we abort the transaction in which a temp namespace was selected,
* we'll have to do any creation or cleanout work over again. So,
* just forget the namespace entirely until next time. On the other
* just forget the namespace entirely until next time. On the other
* hand, if we commit then register an exit callback to clean out the
* temp tables at backend shutdown. (We only want to register the
* callback once per session, so this is a good place to do it.)
@@ -1528,17 +1532,18 @@ AtEOXact_Namespace(bool isCommit)
else
{
myTempNamespace = InvalidOid;
namespaceSearchPathValid = false; /* need to rebuild list */
namespaceSearchPathValid = false; /* need to rebuild list */
}
firstTempTransaction = false;
}
/*
* Clean up if someone failed to do PopSpecialNamespace
*/
if (OidIsValid(mySpecialNamespace))
{
mySpecialNamespace = InvalidOid;
namespaceSearchPathValid = false; /* need to rebuild list */
namespaceSearchPathValid = false; /* need to rebuild list */
}
}
@@ -1561,14 +1566,14 @@ RemoveTempRelations(Oid tempNamespaceId)
/*
* Scan pg_class to find all the relations in the target namespace.
* Ignore indexes, though, on the assumption that they'll go away
* when their tables are deleted.
* Ignore indexes, though, on the assumption that they'll go away when
* their tables are deleted.
*
* NOTE: if there are deletion constraints between temp relations,
* then our CASCADE delete call may cause as-yet-unvisited objects
* to go away. This is okay because we are using SnapshotNow; when
* the scan does reach those pg_class tuples, they'll be ignored as
* already deleted.
* NOTE: if there are deletion constraints between temp relations, then
* our CASCADE delete call may cause as-yet-unvisited objects to go
* away. This is okay because we are using SnapshotNow; when the scan
* does reach those pg_class tuples, they'll be ignored as already
* deleted.
*/
ScanKeyEntryInitialize(&key, 0x0,
Anum_pg_class_relnamespace,
@@ -1605,7 +1610,7 @@ RemoveTempRelations(Oid tempNamespaceId)
static void
RemoveTempRelationsCallback(void)
{
if (OidIsValid(myTempNamespace)) /* should always be true */
if (OidIsValid(myTempNamespace)) /* should always be true */
{
/* Need to ensure we have a usable transaction. */
AbortOutOfAnyTransaction();
@@ -1644,18 +1649,19 @@ assign_search_path(const char *newval, bool doit, bool interactive)
/*
* If we aren't inside a transaction, we cannot do database access so
* cannot verify the individual names. Must accept the list on faith.
* cannot verify the individual names. Must accept the list on faith.
*/
if (interactive && IsTransactionState())
{
/*
* Verify that all the names are either valid namespace names or
* "$user". We do not require $user to correspond to a valid
* namespace. We do not check for USAGE rights, either; should we?
* namespace. We do not check for USAGE rights, either; should
* we?
*/
foreach(l, namelist)
{
char *curname = (char *) lfirst(l);
char *curname = (char *) lfirst(l);
if (strcmp(curname, "$user") == 0)
continue;
@@ -1670,9 +1676,9 @@ assign_search_path(const char *newval, bool doit, bool interactive)
freeList(namelist);
/*
* We mark the path as needing recomputation, but don't do anything until
* it's needed. This avoids trying to do database access during GUC
* initialization.
* We mark the path as needing recomputation, but don't do anything
* until it's needed. This avoids trying to do database access during
* GUC initialization.
*/
if (doit)
namespaceSearchPathValid = false;
@@ -1692,7 +1698,8 @@ InitializeSearchPath(void)
{
/*
* In bootstrap mode, the search path must be 'pg_catalog' so that
* tables are created in the proper namespace; ignore the GUC setting.
* tables are created in the proper namespace; ignore the GUC
* setting.
*/
MemoryContext oldcxt;
@@ -1707,8 +1714,8 @@ InitializeSearchPath(void)
else
{
/*
* In normal mode, arrange for a callback on any syscache invalidation
* of pg_namespace rows.
* In normal mode, arrange for a callback on any syscache
* invalidation of pg_namespace rows.
*/
CacheRegisterSyscacheCallback(NAMESPACEOID,
NamespaceCallback,

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.54 2002/08/22 00:01:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.55 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,8 +54,8 @@ AggregateCreate(const char *aggName,
Oid procOid;
TupleDesc tupDesc;
int i;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/* sanity checks */
if (!aggName)
@@ -85,12 +85,12 @@ AggregateCreate(const char *aggName,
proc = (Form_pg_proc) GETSTRUCT(tup);
if (proc->prorettype != aggTransType)
elog(ERROR, "return type of transition function %s is not %s",
NameListToString(aggtransfnName), format_type_be(aggTransType));
NameListToString(aggtransfnName), format_type_be(aggTransType));
/*
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible),
* so that it's OK to use the first input value as the initial
* type and transtype are the same (or at least binary-compatible), so
* that it's OK to use the first input value as the initial
* transValue.
*/
if (proc->proisstrict && agginitval == NULL)
@@ -128,26 +128,29 @@ AggregateCreate(const char *aggName,
/*
* Everything looks okay. Try to create the pg_proc entry for the
* aggregate. (This could fail if there's already a conflicting entry.)
* aggregate. (This could fail if there's already a conflicting
* entry.)
*/
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
fnArgs[0] = aggBaseType;
procOid = ProcedureCreate(aggName,
aggNamespace,
false, /* no replacement */
false, /* doesn't return a set */
finaltype, /* returnType */
INTERNALlanguageId, /* languageObjectId */
false, /* no replacement */
false, /* doesn't return a set */
finaltype, /* returnType */
INTERNALlanguageId, /* languageObjectId */
0,
"aggregate_dummy", /* placeholder proc */
"-", /* probin */
true, /* isAgg */
false, /* security invoker (currently not definable for agg) */
false, /* isStrict (not needed for agg) */
PROVOLATILE_IMMUTABLE, /* volatility (not needed for agg) */
1, /* parameterCount */
fnArgs); /* parameterTypes */
"aggregate_dummy", /* placeholder proc */
"-", /* probin */
true, /* isAgg */
false, /* security invoker (currently not
* definable for agg) */
false, /* isStrict (not needed for agg) */
PROVOLATILE_IMMUTABLE, /* volatility (not
* needed for agg) */
1, /* parameterCount */
fnArgs); /* parameterTypes */
/*
* Okay to create the pg_aggregate entry.

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.5 2002/08/26 17:53:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.6 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
* Create a constraint table entry.
*
* Subsidiary records (such as triggers or indexes to implement the
* constraint) are *not* created here. But we do make dependency links
* constraint) are *not* created here. But we do make dependency links
* from the constraint to the things it depends on.
*/
Oid
@@ -136,7 +136,7 @@ CreateConstraintEntry(const char *constraintName,
*/
if (conBin)
values[Anum_pg_constraint_conbin - 1] = DirectFunctionCall1(textin,
CStringGetDatum(conBin));
CStringGetDatum(conBin));
else
nulls[Anum_pg_constraint_conbin - 1] = 'n';
@@ -145,7 +145,7 @@ CreateConstraintEntry(const char *constraintName,
*/
if (conSrc)
values[Anum_pg_constraint_consrc - 1] = DirectFunctionCall1(textin,
CStringGetDatum(conSrc));
CStringGetDatum(conSrc));
else
nulls[Anum_pg_constraint_consrc - 1] = 'n';
@@ -165,10 +165,10 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(relId))
{
/*
* Register auto dependency from constraint to owning relation,
* or to specific column(s) if any are mentioned.
* Register auto dependency from constraint to owning relation, or
* to specific column(s) if any are mentioned.
*/
ObjectAddress relobject;
ObjectAddress relobject;
relobject.classId = RelOid_pg_class;
relobject.objectId = relId;
@@ -195,7 +195,7 @@ CreateConstraintEntry(const char *constraintName,
* Register normal dependency from constraint to foreign relation,
* or to specific column(s) if any are mentioned.
*/
ObjectAddress relobject;
ObjectAddress relobject;
relobject.classId = RelOid_pg_class;
relobject.objectId = foreignRelId;
@@ -219,11 +219,11 @@ CreateConstraintEntry(const char *constraintName,
if (conExpr != NULL)
{
/*
* Register dependencies from constraint to objects mentioned
* in CHECK expression. We gin up a rather bogus rangetable
* list to handle any Vars in the constraint.
* Register dependencies from constraint to objects mentioned in
* CHECK expression. We gin up a rather bogus rangetable list to
* handle any Vars in the constraint.
*/
RangeTblEntry rte;
RangeTblEntry rte;
MemSet(&rte, 0, sizeof(rte));
rte.type = T_RangeTblEntry;
@@ -271,7 +271,7 @@ ConstraintNameIsUsed(Oid relId, Oid relNamespace, const char *cname)
while (HeapTupleIsValid(tup = systable_getnext(conscan)))
{
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
if (con->conrelid == relId)
{
@@ -338,7 +338,7 @@ GenerateConstraintName(Oid relId, Oid relNamespace, int *counter)
while (HeapTupleIsValid(tup = systable_getnext(conscan)))
{
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
if (con->conrelid == relId)
{
@@ -366,7 +366,7 @@ ConstraintNameIsGenerated(const char *cname)
{
if (cname[0] != '$')
return false;
if (strspn(cname+1, "0123456789") != strlen(cname+1))
if (strspn(cname + 1, "0123456789") != strlen(cname + 1))
return false;
return true;
}
@@ -377,11 +377,11 @@ ConstraintNameIsGenerated(const char *cname)
void
RemoveConstraintById(Oid conId)
{
Relation conDesc;
ScanKeyData skey[1];
SysScanDesc conscan;
HeapTuple tup;
Form_pg_constraint con;
Relation conDesc;
ScanKeyData skey[1];
SysScanDesc conscan;
HeapTuple tup;
Form_pg_constraint con;
conDesc = heap_openr(ConstraintRelationName, RowExclusiveLock);
@@ -399,8 +399,8 @@ RemoveConstraintById(Oid conId)
con = (Form_pg_constraint) GETSTRUCT(tup);
/*
* If the constraint is for a relation, open and exclusive-lock
* the relation it's for.
* If the constraint is for a relation, open and exclusive-lock the
* relation it's for.
*
* XXX not clear what we should lock, if anything, for other constraints.
*/
@@ -411,16 +411,16 @@ RemoveConstraintById(Oid conId)
rel = heap_open(con->conrelid, AccessExclusiveLock);
/*
* We need to update the relcheck count if it is a check constraint
* being dropped. This update will force backends to rebuild
* relcache entries when we commit.
* We need to update the relcheck count if it is a check
* constraint being dropped. This update will force backends to
* rebuild relcache entries when we commit.
*/
if (con->contype == CONSTRAINT_CHECK)
{
Relation pgrel;
HeapTuple relTup;
Form_pg_class classForm;
Relation pgrel;
HeapTuple relTup;
Form_pg_class classForm;
pgrel = heap_openr(RelationRelationName, RowExclusiveLock);
relTup = SearchSysCacheCopy(RELOID,
ObjectIdGetDatum(con->conrelid),

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.5 2002/08/06 05:40:45 ishii Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.6 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,12 +36,13 @@
* Add a new tuple to pg_coversion.
* ---------------
*/
Oid ConversionCreate(const char *conname, Oid connamespace,
int32 conowner,
int4 conforencoding, int4 contoencoding,
Oid conproc, bool def)
Oid
ConversionCreate(const char *conname, Oid connamespace,
int32 conowner,
int4 conforencoding, int4 contoencoding,
Oid conproc, bool def)
{
int i;
int i;
Relation rel;
TupleDesc tupDesc;
HeapTuple tup;
@@ -49,8 +50,8 @@ Oid ConversionCreate(const char *conname, Oid connamespace,
Datum values[Natts_pg_conversion];
NameData cname;
Oid oid;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/* sanity checks */
if (!conname)
@@ -58,20 +59,22 @@ Oid ConversionCreate(const char *conname, Oid connamespace,
/* make sure there is no existing conversion of same name */
if (SearchSysCacheExists(CONNAMESP,
PointerGetDatum(conname),
ObjectIdGetDatum(connamespace),
0,0))
PointerGetDatum(conname),
ObjectIdGetDatum(connamespace),
0, 0))
elog(ERROR, "conversion name \"%s\" already exists", conname);
if (def)
{
/* make sure there is no existing default
<for encoding><to encoding> pair in this name space */
/*
* make sure there is no existing default <for encoding><to
* encoding> pair in this name space
*/
if (FindDefaultConversion(connamespace,
conforencoding,
contoencoding))
elog(ERROR, "default conversion for %s to %s already exists",
pg_encoding_to_char(conforencoding),pg_encoding_to_char(contoencoding));
pg_encoding_to_char(conforencoding), pg_encoding_to_char(contoencoding));
}
/* open pg_conversion */
@@ -129,8 +132,9 @@ Oid ConversionCreate(const char *conname, Oid connamespace,
* Drop a conversion and do dependency check.
* ---------------
*/
void ConversionDrop(const char *conname, Oid connamespace,
int32 conowner, DropBehavior behavior)
void
ConversionDrop(const char *conname, Oid connamespace,
int32 conowner, DropBehavior behavior)
{
Relation rel;
TupleDesc tupDesc;
@@ -138,8 +142,8 @@ void ConversionDrop(const char *conname, Oid connamespace,
HeapScanDesc scan;
ScanKeyData scanKeyData;
Form_pg_conversion body;
ObjectAddress object;
Oid myoid;
ObjectAddress object;
Oid myoid;
/* sanity checks */
if (!conname)
@@ -156,12 +160,12 @@ void ConversionDrop(const char *conname, Oid connamespace,
tupDesc = rel->rd_att;
scan = heap_beginscan(rel, SnapshotNow,
1, &scanKeyData);
1, &scanKeyData);
/* search for the target tuple */
while (HeapTupleIsValid(tuple = heap_getnext(scan, ForwardScanDirection)))
{
body = (Form_pg_conversion)GETSTRUCT(tuple);
body = (Form_pg_conversion) GETSTRUCT(tuple);
if (!strncmp(NameStr(body->conname), conname, NAMEDATALEN))
break;
}
@@ -172,12 +176,12 @@ void ConversionDrop(const char *conname, Oid connamespace,
return;
}
if (!superuser() && ((Form_pg_conversion)GETSTRUCT(tuple))->conowner != GetUserId())
if (!superuser() && ((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
elog(ERROR, "permission denied");
myoid = HeapTupleGetOid(tuple);
heap_endscan(scan);
heap_close(rel, AccessShareLock);
heap_close(rel, AccessShareLock);
/*
* Do the deletion
@@ -215,7 +219,7 @@ RemoveConversionById(Oid conversionOid)
tupDesc = rel->rd_att;
scan = heap_beginscan(rel, SnapshotNow,
1, &scanKeyData);
1, &scanKeyData);
/* search for the target tuple */
if (HeapTupleIsValid(tuple = heap_getnext(scan, ForwardScanDirection)))
@@ -233,28 +237,29 @@ RemoveConversionById(Oid conversionOid)
* If found, returns the procedure's oid, otherwise InvalidOid.
* ---------------
*/
Oid FindDefaultConversion(Oid name_space, int4 for_encoding, int4 to_encoding)
Oid
FindDefaultConversion(Oid name_space, int4 for_encoding, int4 to_encoding)
{
CatCList *catlist;
CatCList *catlist;
HeapTuple tuple;
Form_pg_conversion body;
Oid proc = InvalidOid;
int i;
Oid proc = InvalidOid;
int i;
/* Check we have usage rights in target namespace */
if (pg_namespace_aclcheck(name_space, GetUserId(), ACL_USAGE) != ACLCHECK_OK)
return proc;
catlist = SearchSysCacheList(CONDEFAULT, 3,
ObjectIdGetDatum(name_space),
Int32GetDatum(for_encoding),
Int32GetDatum(to_encoding),
0);
ObjectIdGetDatum(name_space),
Int32GetDatum(for_encoding),
Int32GetDatum(to_encoding),
0);
for (i = 0; i < catlist->n_members; i++)
{
tuple = &catlist->members[i]->tuple;
body = (Form_pg_conversion)GETSTRUCT(tuple);
body = (Form_pg_conversion) GETSTRUCT(tuple);
if (body->condefault == TRUE)
{
proc = body->conproc;
@@ -272,22 +277,23 @@ Oid FindDefaultConversion(Oid name_space, int4 for_encoding, int4 to_encoding)
* Returns conversion oid.
* ---------------
*/
Oid FindConversion(const char *conname, Oid connamespace)
Oid
FindConversion(const char *conname, Oid connamespace)
{
HeapTuple tuple;
Oid procoid;
Oid conoid;
Oid procoid;
Oid conoid;
AclResult aclresult;
/* search pg_conversion by connamespace and conversion name */
tuple = SearchSysCache(CONNAMESP,
PointerGetDatum(conname),
ObjectIdGetDatum(connamespace),
0,0);
0, 0);
if (!HeapTupleIsValid(tuple))
return InvalidOid;
procoid = ((Form_pg_conversion)GETSTRUCT(tuple))->conproc;
procoid = ((Form_pg_conversion) GETSTRUCT(tuple))->conproc;
conoid = HeapTupleGetOid(tuple);
ReleaseSysCache(tuple);
@@ -318,7 +324,7 @@ pg_convert3(PG_FUNCTION_ARGS)
text *retval;
unsigned char *str;
unsigned char *result;
int len;
int len;
if (!OidIsValid(convoid))
elog(ERROR, "Conversion does not exist");
@@ -331,13 +337,13 @@ pg_convert3(PG_FUNCTION_ARGS)
tuple = SearchSysCache(CONOID,
ObjectIdGetDatum(convoid),
0,0,0);
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "Conversion %u search from syscache failed", convoid);
result = palloc(len * 4 + 1);
body = (Form_pg_conversion)GETSTRUCT(tuple);
body = (Form_pg_conversion) GETSTRUCT(tuple);
OidFunctionCall5(body->conproc,
Int32GetDatum(body->conforencoding),
Int32GetDatum(body->contoencoding),
@@ -347,9 +353,11 @@ pg_convert3(PG_FUNCTION_ARGS)
ReleaseSysCache(tuple);
/* build text data type structre. we cannot use textin() here,
since textin assumes that input string encoding is same as
database encoding. */
/*
* build text data type structre. we cannot use textin() here, since
* textin assumes that input string encoding is same as database
* encoding.
*/
len = strlen(result) + VARHDRSZ;
retval = palloc(len);
VARATT_SIZEP(retval) = len;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_depend.c,v 1.5 2002/08/11 21:17:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_depend.c,v 1.6 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@ recordDependencyOn(const ObjectAddress *depender,
/*
* Record multiple dependencies (of the same kind) for a single dependent
* object. This has a little less overhead than recording each separately.
* object. This has a little less overhead than recording each separately.
*/
void
recordMultipleDependencies(const ObjectAddress *depender,
@@ -79,9 +79,9 @@ recordMultipleDependencies(const ObjectAddress *depender,
for (i = 0; i < nreferenced; i++, referenced++)
{
/*
* If the referenced object is pinned by the system, there's no real
* need to record dependencies on it. This saves lots of space in
* pg_depend, so it's worth the time taken to check.
* If the referenced object is pinned by the system, there's no
* real need to record dependencies on it. This saves lots of
* space in pg_depend, so it's worth the time taken to check.
*/
if (!isObjectPinned(referenced, dependDesc))
{
@@ -89,15 +89,15 @@ recordMultipleDependencies(const ObjectAddress *depender,
* Record the Dependency. Note we don't bother to check for
* duplicate dependencies; there's no harm in them.
*/
values[Anum_pg_depend_classid - 1] = ObjectIdGetDatum(depender->classId);
values[Anum_pg_depend_objid - 1] = ObjectIdGetDatum(depender->objectId);
values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(depender->objectSubId);
values[Anum_pg_depend_classid - 1] = ObjectIdGetDatum(depender->classId);
values[Anum_pg_depend_objid - 1] = ObjectIdGetDatum(depender->objectId);
values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(depender->objectSubId);
values[Anum_pg_depend_refclassid - 1] = ObjectIdGetDatum(referenced->classId);
values[Anum_pg_depend_refobjid - 1] = ObjectIdGetDatum(referenced->objectId);
values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(referenced->objectSubId);
values[Anum_pg_depend_refclassid - 1] = ObjectIdGetDatum(referenced->classId);
values[Anum_pg_depend_refobjid - 1] = ObjectIdGetDatum(referenced->objectId);
values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(referenced->objectSubId);
values[Anum_pg_depend_deptype -1] = CharGetDatum((char) behavior);
values[Anum_pg_depend_deptype - 1] = CharGetDatum((char) behavior);
tup = heap_formtuple(dependDesc->rd_att, values, nulls);
@@ -130,11 +130,11 @@ recordMultipleDependencies(const ObjectAddress *depender,
long
deleteDependencyRecordsFor(Oid classId, Oid objectId)
{
long count = 0;
Relation depRel;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tup;
long count = 0;
Relation depRel;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tup;
depRel = heap_openr(DependRelationName, RowExclusiveLock);
@@ -174,7 +174,7 @@ static bool
isObjectPinned(const ObjectAddress *object, Relation rel)
{
bool ret = false;
SysScanDesc scan;
SysScanDesc scan;
HeapTuple tup;
ScanKeyData key[2];
@@ -192,13 +192,13 @@ isObjectPinned(const ObjectAddress *object, Relation rel)
/*
* Since we won't generate additional pg_depend entries for pinned
* objects, there can be at most one entry referencing a pinned
* object. Hence, it's sufficient to look at the first returned
* object. Hence, it's sufficient to look at the first returned
* tuple; we don't need to loop.
*/
tup = systable_getnext(scan);
if (HeapTupleIsValid(tup))
{
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
if (foundDep->deptype == DEPENDENCY_PIN)
ret = true;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.76 2002/08/22 00:01:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.77 2002/09/04 20:31:14 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -34,28 +34,28 @@
static Oid OperatorGet(const char *operatorName,
Oid operatorNamespace,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
Oid operatorNamespace,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
static Oid OperatorLookup(List *operatorName,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
static Oid OperatorShellMake(const char *operatorName,
Oid operatorNamespace,
Oid leftTypeId,
Oid rightTypeId);
Oid operatorNamespace,
Oid leftTypeId,
Oid rightTypeId);
static void OperatorUpd(Oid baseId, Oid commId, Oid negId);
static Oid get_other_operator(List *otherOp,
Oid otherLeftTypeId, Oid otherRightTypeId,
const char *operatorName, Oid operatorNamespace,
Oid leftTypeId, Oid rightTypeId,
bool isCommutator);
Oid otherLeftTypeId, Oid otherRightTypeId,
const char *operatorName, Oid operatorNamespace,
Oid leftTypeId, Oid rightTypeId,
bool isCommutator);
static void makeOperatorDependencies(HeapTuple tuple, Oid pg_operator_relid);
@@ -229,23 +229,23 @@ OperatorShellMake(const char *operatorName,
*/
i = 0;
namestrcpy(&oname, operatorName);
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */
values[i++] = Int32GetDatum(GetUserId()); /* oprowner */
values[i++] = Int32GetDatum(GetUserId()); /* oprowner */
values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */
values[i++] = BoolGetDatum(false); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
values[i++] = ObjectIdGetDatum(rightTypeId); /* oprright */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprresult */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprcom */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprnegate */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprlsortop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprrsortop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprltcmpop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprgtcmpop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprcode */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprrest */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprjoin */
values[i++] = BoolGetDatum(false); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
values[i++] = ObjectIdGetDatum(rightTypeId); /* oprright */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprresult */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprcom */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprnegate */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprlsortop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprrsortop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprltcmpop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprgtcmpop */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprcode */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprrest */
values[i++] = ObjectIdGetDatum(InvalidOid); /* oprjoin */
/*
* open pg_operator
@@ -506,14 +506,14 @@ OperatorCreate(const char *operatorName,
i = 0;
namestrcpy(&oname, operatorName);
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */
values[i++] = Int32GetDatum(GetUserId()); /* oprowner */
values[i++] = Int32GetDatum(GetUserId()); /* oprowner */
values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */
values[i++] = BoolGetDatum(canHash); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
values[i++] = ObjectIdGetDatum(rightTypeId); /* oprright */
values[i++] = ObjectIdGetDatum(operResultType); /* oprresult */
values[i++] = BoolGetDatum(canHash); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
values[i++] = ObjectIdGetDatum(rightTypeId); /* oprright */
values[i++] = ObjectIdGetDatum(operResultType); /* oprresult */
/*
* Set up the other operators. If they do not currently exist, create
@@ -528,16 +528,17 @@ OperatorCreate(const char *operatorName,
operatorName, operatorNamespace,
leftTypeId, rightTypeId,
true);
/*
* self-linkage to this operator; will fix below. Note
* that only self-linkage for commutation makes sense.
* self-linkage to this operator; will fix below. Note that only
* self-linkage for commutation makes sense.
*/
if (!OidIsValid(commutatorId))
selfCommutator = true;
}
else
commutatorId = InvalidOid;
values[i++] = ObjectIdGetDatum(commutatorId); /* oprcom */
values[i++] = ObjectIdGetDatum(commutatorId); /* oprcom */
if (negatorName)
{
@@ -550,20 +551,20 @@ OperatorCreate(const char *operatorName,
}
else
negatorId = InvalidOid;
values[i++] = ObjectIdGetDatum(negatorId); /* oprnegate */
values[i++] = ObjectIdGetDatum(negatorId); /* oprnegate */
if (leftSortName)
{
/* left sort op takes left-side data type */
leftSortId = get_other_operator(leftSortName,
leftTypeId, leftTypeId,
operatorName, operatorNamespace,
leftTypeId, rightTypeId,
false);
leftTypeId, leftTypeId,
operatorName, operatorNamespace,
leftTypeId, rightTypeId,
false);
}
else
leftSortId = InvalidOid;
values[i++] = ObjectIdGetDatum(leftSortId); /* oprlsortop */
values[i++] = ObjectIdGetDatum(leftSortId); /* oprlsortop */
if (rightSortName)
{
@@ -576,7 +577,7 @@ OperatorCreate(const char *operatorName,
}
else
rightSortId = InvalidOid;
values[i++] = ObjectIdGetDatum(rightSortId); /* oprrsortop */
values[i++] = ObjectIdGetDatum(rightSortId); /* oprrsortop */
if (ltCompareName)
{
@@ -589,7 +590,7 @@ OperatorCreate(const char *operatorName,
}
else
ltCompareId = InvalidOid;
values[i++] = ObjectIdGetDatum(ltCompareId); /* oprltcmpop */
values[i++] = ObjectIdGetDatum(ltCompareId); /* oprltcmpop */
if (gtCompareName)
{
@@ -602,11 +603,11 @@ OperatorCreate(const char *operatorName,
}
else
gtCompareId = InvalidOid;
values[i++] = ObjectIdGetDatum(gtCompareId); /* oprgtcmpop */
values[i++] = ObjectIdGetDatum(gtCompareId); /* oprgtcmpop */
values[i++] = ObjectIdGetDatum(procOid); /* oprcode */
values[i++] = ObjectIdGetDatum(restOid); /* oprrest */
values[i++] = ObjectIdGetDatum(joinOid); /* oprjoin */
values[i++] = ObjectIdGetDatum(procOid); /* oprcode */
values[i++] = ObjectIdGetDatum(restOid); /* oprrest */
values[i++] = ObjectIdGetDatum(joinOid); /* oprjoin */
pg_operator_desc = heap_openr(OperatorRelationName, RowExclusiveLock);
@@ -703,8 +704,8 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
otherRightTypeId == rightTypeId)
{
/*
* self-linkage to this operator; caller will fix later. Note
* that only self-linkage for commutation makes sense.
* self-linkage to this operator; caller will fix later. Note that
* only self-linkage for commutation makes sense.
*/
if (!isCommutator)
elog(ERROR, "operator cannot be its own negator or sort operator");
@@ -868,9 +869,9 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
static void
makeOperatorDependencies(HeapTuple tuple, Oid pg_operator_relid)
{
Form_pg_operator oper = (Form_pg_operator) GETSTRUCT(tuple);
ObjectAddress myself,
referenced;
Form_pg_operator oper = (Form_pg_operator) GETSTRUCT(tuple);
ObjectAddress myself,
referenced;
myself.classId = pg_operator_relid;
myself.objectId = HeapTupleGetOid(tuple);
@@ -918,11 +919,11 @@ makeOperatorDependencies(HeapTuple tuple, Oid pg_operator_relid)
/*
* NOTE: we do not consider the operator to depend on the associated
* operators oprcom, oprnegate, oprlsortop, oprrsortop, oprltcmpop,
* oprgtcmpop. We would not want to delete this operator if those
* go away, but only reset the link fields; which is not a function
* that the dependency code can presently handle. (Something could
* perhaps be done with objectSubId though.) For now, it's okay to
* let those links dangle if a referenced operator is removed.
* oprgtcmpop. We would not want to delete this operator if those go
* away, but only reset the link fields; which is not a function that
* the dependency code can presently handle. (Something could perhaps
* be done with objectSubId though.) For now, it's okay to let those
* links dangle if a referenced operator is removed.
*/
/* Dependency on implementation function */

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.92 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.93 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,9 +34,9 @@
static void checkretval(Oid rettype, char fn_typtype, List *queryTreeList);
Datum fmgr_internal_validator(PG_FUNCTION_ARGS);
Datum fmgr_c_validator(PG_FUNCTION_ARGS);
Datum fmgr_sql_validator(PG_FUNCTION_ARGS);
Datum fmgr_internal_validator(PG_FUNCTION_ARGS);
Datum fmgr_c_validator(PG_FUNCTION_ARGS);
Datum fmgr_sql_validator(PG_FUNCTION_ARGS);
/* ----------------------------------------------------------------
@@ -73,8 +73,8 @@ ProcedureCreate(const char *procedureName,
TupleDesc tupDesc;
Oid retval;
bool is_update;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/*
* sanity checks
@@ -153,16 +153,16 @@ ProcedureCreate(const char *procedureName,
i = 0;
namestrcpy(&procname, procedureName);
values[i++] = NameGetDatum(&procname); /* proname */
values[i++] = ObjectIdGetDatum(procNamespace); /* pronamespace */
values[i++] = ObjectIdGetDatum(procNamespace); /* pronamespace */
values[i++] = Int32GetDatum(GetUserId()); /* proowner */
values[i++] = ObjectIdGetDatum(languageObjectId); /* prolang */
values[i++] = BoolGetDatum(isAgg); /* proisagg */
values[i++] = BoolGetDatum(security_definer); /* prosecdef */
values[i++] = ObjectIdGetDatum(languageObjectId); /* prolang */
values[i++] = BoolGetDatum(isAgg); /* proisagg */
values[i++] = BoolGetDatum(security_definer); /* prosecdef */
values[i++] = BoolGetDatum(isStrict); /* proisstrict */
values[i++] = BoolGetDatum(returnsSet); /* proretset */
values[i++] = CharGetDatum(volatility); /* provolatile */
values[i++] = UInt16GetDatum(parameterCount); /* pronargs */
values[i++] = ObjectIdGetDatum(returnType); /* prorettype */
values[i++] = UInt16GetDatum(parameterCount); /* pronargs */
values[i++] = ObjectIdGetDatum(returnType); /* prorettype */
values[i++] = PointerGetDatum(typev); /* proargtypes */
values[i++] = DirectFunctionCall1(textin, /* prosrc */
CStringGetDatum(prosrc));
@@ -213,8 +213,8 @@ ProcedureCreate(const char *procedureName,
}
/* do not change existing ownership or permissions, either */
replaces[Anum_pg_proc_proowner-1] = ' ';
replaces[Anum_pg_proc_proacl-1] = ' ';
replaces[Anum_pg_proc_proowner - 1] = ' ';
replaces[Anum_pg_proc_proacl - 1] = ' ';
/* Okay, do it... */
tup = heap_modifytuple(oldtup, rel, values, nulls, replaces);
@@ -228,7 +228,7 @@ ProcedureCreate(const char *procedureName,
/* Creating a new procedure */
/* start out with empty permissions */
nulls[Anum_pg_proc_proacl-1] = 'n';
nulls[Anum_pg_proc_proacl - 1] = 'n';
tup = heap_formtuple(tupDesc, values, nulls);
simple_heap_insert(rel, tup);
@@ -332,7 +332,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
tlist = parse->targetList;
/*
* The last query must be a SELECT if and only if return type isn't VOID.
* The last query must be a SELECT if and only if return type isn't
* VOID.
*/
if (rettype == VOIDOID)
{
@@ -360,8 +361,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
/*
* For base-type returns, the target list should have exactly one
* entry, and its type should agree with what the user declared. (As
* of Postgres 7.2, we accept binary-compatible types too.)
* entry, and its type should agree with what the user declared.
* (As of Postgres 7.2, we accept binary-compatible types too.)
*/
if (tlistlen != 1)
elog(ERROR, "function declared to return %s returns multiple columns in final SELECT",
@@ -378,11 +379,11 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
Assert(typerelid != InvalidOid);
/*
* If the target list is of length 1, and the type of the varnode in
* the target list matches the declared return type, this is okay.
* This can happen, for example, where the body of the function is
* 'SELECT func2()', where func2 has the same return type as the
* function that's calling it.
* If the target list is of length 1, and the type of the varnode
* in the target list matches the declared return type, this is
* okay. This can happen, for example, where the body of the
* function is 'SELECT func2()', where func2 has the same return
* type as the function that's calling it.
*/
if (tlistlen == 1)
{
@@ -392,15 +393,15 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
}
/*
* Otherwise verify that the targetlist matches the return tuple type.
* This part of the typechecking is a hack. We look up the relation
* that is the declared return type, and scan the non-deleted
* attributes to ensure that they match the datatypes of the
* non-resjunk columns.
* Otherwise verify that the targetlist matches the return tuple
* type. This part of the typechecking is a hack. We look up the
* relation that is the declared return type, and scan the
* non-deleted attributes to ensure that they match the datatypes
* of the non-resjunk columns.
*/
reln = relation_open(typerelid, AccessShareLock);
relnatts = reln->rd_rel->relnatts;
rellogcols = 0; /* we'll count nondeleted cols as we go */
rellogcols = 0; /* we'll count nondeleted cols as we go */
colindex = 0;
foreach(tlistitem, tlist)
@@ -413,7 +414,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
if (tle->resdom->resjunk)
continue;
do {
do
{
colindex++;
if (colindex > relnatts)
elog(ERROR, "function declared to return %s does not SELECT the right number of columns (%d)",
@@ -453,8 +455,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
Assert(typerelid == InvalidOid);
/*
* For RECORD return type, defer this check until we get the
* first tuple.
* For RECORD return type, defer this check until we get the first
* tuple.
*/
}
else
@@ -532,7 +534,7 @@ fmgr_c_validator(PG_FUNCTION_ARGS)
if (isnull)
elog(ERROR, "null probin");
probin = DatumGetCString(DirectFunctionCall1(textout, tmp));
(void) load_external_function(probin, prosrc, true, &libraryhandle);
(void) fetch_finfo_record(libraryhandle, prosrc);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.81 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.82 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace)
i = 0;
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typowner */
values[i++] = Int16GetDatum(0); /* typlen */
values[i++] = BoolGetDatum(false); /* typbyval */
@@ -84,12 +84,12 @@ TypeShellMake(const char *typeName, Oid typeNamespace)
values[i++] = ObjectIdGetDatum(InvalidOid); /* typelem */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typinput */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typoutput */
values[i++] = CharGetDatum('i'); /* typalign */
values[i++] = CharGetDatum('p'); /* typstorage */
values[i++] = BoolGetDatum(false); /* typnotnull */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
values[i++] = Int32GetDatum(-1); /* typtypmod */
values[i++] = Int32GetDatum(0); /* typndims */
values[i++] = CharGetDatum('i'); /* typalign */
values[i++] = CharGetDatum('p'); /* typstorage */
values[i++] = BoolGetDatum(false); /* typnotnull */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
values[i++] = Int32GetDatum(-1); /* typtypmod */
values[i++] = Int32GetDatum(0); /* typndims */
nulls[i++] = 'n'; /* typdefaultbin */
nulls[i++] = 'n'; /* typdefault */
@@ -129,8 +129,8 @@ Oid
TypeCreate(const char *typeName,
Oid typeNamespace,
Oid assignedTypeOid,
Oid relationOid, /* only for 'c'atalog typeType */
char relationKind, /* ditto */
Oid relationOid, /* only for 'c'atalog typeType */
char relationKind, /* ditto */
int16 internalSize,
char typeType,
char typDelim,
@@ -138,13 +138,13 @@ TypeCreate(const char *typeName,
Oid outputProcedure,
Oid elementType,
Oid baseType,
const char *defaultTypeValue, /* human readable rep */
const char *defaultTypeValue, /* human readable rep */
const char *defaultTypeBin, /* cooked rep */
bool passedByValue,
char alignment,
char storage,
int32 typeMod,
int32 typNDims, /* Array dimensions for baseType */
int32 typNDims, /* Array dimensions for baseType */
bool typeNotNull)
{
Relation pg_type_desc;
@@ -158,8 +158,8 @@ TypeCreate(const char *typeName,
int i;
/*
* We assume that the caller validated the arguments individually,
* but did not check for bad combinations.
* We assume that the caller validated the arguments individually, but
* did not check for bad combinations.
*
* Validate size specifications: either positive (fixed-length) or -1
* (varlena) or -2 (cstring). Pass-by-value types must have a fixed
@@ -195,27 +195,27 @@ TypeCreate(const char *typeName,
i = 0;
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
values[i++] = Int32GetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(internalSize); /* typlen */
values[i++] = BoolGetDatum(passedByValue); /* typbyval */
values[i++] = CharGetDatum(typeType); /* typtype */
values[i++] = BoolGetDatum(true); /* typisdefined */
values[i++] = BoolGetDatum(true); /* typisdefined */
values[i++] = CharGetDatum(typDelim); /* typdelim */
values[i++] = ObjectIdGetDatum(typeType == 'c' ? relationOid : InvalidOid); /* typrelid */
values[i++] = ObjectIdGetDatum(elementType); /* typelem */
values[i++] = ObjectIdGetDatum(inputProcedure); /* typinput */
values[i++] = ObjectIdGetDatum(outputProcedure); /* typoutput */
values[i++] = ObjectIdGetDatum(elementType); /* typelem */
values[i++] = ObjectIdGetDatum(inputProcedure); /* typinput */
values[i++] = ObjectIdGetDatum(outputProcedure); /* typoutput */
values[i++] = CharGetDatum(alignment); /* typalign */
values[i++] = CharGetDatum(storage); /* typstorage */
values[i++] = BoolGetDatum(typeNotNull); /* typnotnull */
values[i++] = ObjectIdGetDatum(baseType); /* typbasetype */
values[i++] = Int32GetDatum(typeMod); /* typtypmod */
values[i++] = Int32GetDatum(typNDims); /* typndims */
values[i++] = BoolGetDatum(typeNotNull); /* typnotnull */
values[i++] = ObjectIdGetDatum(baseType); /* typbasetype */
values[i++] = Int32GetDatum(typeMod); /* typtypmod */
values[i++] = Int32GetDatum(typNDims); /* typndims */
/*
* initialize the default binary value for this type. Check for
* nulls of course.
* initialize the default binary value for this type. Check for nulls
* of course.
*/
if (defaultTypeBin)
values[i] = DirectFunctionCall1(textin,
@@ -229,7 +229,7 @@ TypeCreate(const char *typeName,
*/
if (defaultTypeValue)
values[i] = DirectFunctionCall1(textin,
CStringGetDatum(defaultTypeValue));
CStringGetDatum(defaultTypeValue));
else
nulls[i] = 'n';
i++; /* typdefault */
@@ -291,8 +291,8 @@ TypeCreate(const char *typeName,
*/
if (!IsBootstrapProcessingMode())
{
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
myself.classId = RelOid_pg_type;
myself.objectId = typeObjectId;
@@ -321,13 +321,13 @@ TypeCreate(const char *typeName,
/*
* If the type is a rowtype for a relation, mark it as internally
* dependent on the relation, *unless* it is a stand-alone composite
* type relation. For the latter case, we have to reverse the
* dependency.
* dependent on the relation, *unless* it is a stand-alone
* composite type relation. For the latter case, we have to
* reverse the dependency.
*
* In the former case, this allows the type to be auto-dropped
* when the relation is, and not otherwise. And in the latter,
* of course we get the opposite effect.
* In the former case, this allows the type to be auto-dropped when
* the relation is, and not otherwise. And in the latter, of
* course we get the opposite effect.
*/
if (OidIsValid(relationOid))
{
@@ -342,11 +342,11 @@ TypeCreate(const char *typeName,
}
/*
* If the type is an array type, mark it auto-dependent on the base
* type. (This is a compromise between the typical case where the
* array type is automatically generated and the case where it is
* manually created: we'd prefer INTERNAL for the former case and
* NORMAL for the latter.)
* If the type is an array type, mark it auto-dependent on the
* base type. (This is a compromise between the typical case
* where the array type is automatically generated and the case
* where it is manually created: we'd prefer INTERNAL for the
* former case and NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.4 2002/08/22 00:01:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.5 2002/09/04 20:31:14 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -110,8 +110,8 @@ DefineAggregate(List *names, List *parameters)
* We have historically allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
*
* basetype can be a pseudo-type, but transtype can't, since we need
* to be able to store values of the transtype.
* basetype can be a pseudo-type, but transtype can't, since we need to
* be able to store values of the transtype.
*/
if (strcasecmp(TypeNameToString(baseType), "ANY") == 0)
baseTypeId = ANYOID;
@@ -127,10 +127,10 @@ DefineAggregate(List *names, List *parameters)
* Most of the argument-checking is done inside of AggregateCreate
*/
AggregateCreate(aggName, /* aggregate name */
aggNamespace, /* namespace */
aggNamespace, /* namespace */
transfuncName, /* step function name */
finalfuncName, /* final function name */
baseTypeId, /* type of data being aggregated */
baseTypeId, /* type of data being aggregated */
transTypeId, /* transition data type */
initval); /* initial condition */
}
@@ -154,8 +154,8 @@ RemoveAggregate(RemoveAggrStmt *stmt)
* if a basetype is passed in, then attempt to find an aggregate for
* that specific type.
*
* else attempt to find an aggregate with a basetype of ANYOID.
* This means that the aggregate is to apply to all basetypes (eg, COUNT).
* else attempt to find an aggregate with a basetype of ANYOID. This
* means that the aggregate is to apply to all basetypes (eg, COUNT).
*/
if (aggType)
basetypeID = typenameTypeId(aggType);
@@ -170,7 +170,7 @@ RemoveAggregate(RemoveAggrStmt *stmt)
tup = SearchSysCache(PROCOID,
ObjectIdGetDatum(procOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveAggregate: couldn't find pg_proc tuple for %s",
NameListToString(aggName));

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.45 2002/08/26 18:45:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.46 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -110,10 +110,10 @@ typedef struct
/* Default statistics target (GUC parameter) */
int default_statistics_target = 10;
int default_statistics_target = 10;
static int elevel = -1;
static int elevel = -1;
static MemoryContext anl_context = NULL;
@@ -163,9 +163,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
elevel = DEBUG1;
/*
* Use the current context for storing analysis info. vacuum.c ensures
* that this context will be cleared when I return, thus releasing the
* memory allocated here.
* Use the current context for storing analysis info. vacuum.c
* ensures that this context will be cleared when I return, thus
* releasing the memory allocated here.
*/
anl_context = CurrentMemoryContext;
@@ -219,7 +219,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
* We can ANALYZE any table except pg_statistic. See update_attstats
*/
if (IsSystemNamespace(RelationGetNamespace(onerel)) &&
strcmp(RelationGetRelationName(onerel), StatisticRelationName) == 0)
strcmp(RelationGetRelationName(onerel), StatisticRelationName) == 0)
{
relation_close(onerel, AccessShareLock);
return;
@@ -1042,11 +1042,15 @@ compute_minimal_stats(VacAttrStats *stats,
*/
int f1 = nonnull_cnt - summultiple;
int d = f1 + nmultiple;
double numer, denom, stadistinct;
double numer,
denom,
stadistinct;
numer = (double) numrows *(double) d;
numer = (double) numrows * (double) d;
denom = (double) (numrows - f1) +
(double) f1 * (double) numrows / totalrows;
(double) f1 *(double) numrows / totalrows;
stadistinct = numer / denom;
/* Clamp to sane range in case of roundoff error */
if (stadistinct < (double) d)
@@ -1361,11 +1365,15 @@ compute_scalar_stats(VacAttrStats *stats,
*/
int f1 = ndistinct - nmultiple + toowide_cnt;
int d = f1 + nmultiple;
double numer, denom, stadistinct;
double numer,
denom,
stadistinct;
numer = (double) numrows *(double) d;
numer = (double) numrows * (double) d;
denom = (double) (numrows - f1) +
(double) f1 * (double) numrows / totalrows;
(double) f1 *(double) numrows / totalrows;
stadistinct = numer / denom;
/* Clamp to sane range in case of roundoff error */
if (stadistinct < (double) d)
@@ -1666,7 +1674,7 @@ compare_mcvs(const void *a, const void *b)
* relation for ANALYZE (ie, ShareUpdateExclusiveLock instead
* of AccessShareLock); but that cure seems worse than the disease,
* especially now that ANALYZE doesn't start a new transaction
* for each relation. The lock could be held for a long time...
* for each relation. The lock could be held for a long time...
*/
static void
update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats)

View File

@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.89 2002/09/03 01:04:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.90 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ static void swap_relfilenodes(Oid r1, Oid r2);
*
* This clusters the table by creating a new, clustered table and
* swapping the relfilenodes of the new table and the old table, so
* the OID of the original table is preserved. Thus we do not lose
* the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases thru 7.3).
*
@@ -111,11 +111,11 @@ cluster(RangeVar *oldrelation, char *oldindexname)
RelationGetRelationName(OldHeap));
/*
* Disallow clustering system relations. This will definitely NOT work
* for shared relations (we have no way to update pg_class rows in other
* databases), nor for nailed-in-cache relations (the relfilenode values
* for those are hardwired, see relcache.c). It might work for other
* system relations, but I ain't gonna risk it.
* Disallow clustering system relations. This will definitely NOT
* work for shared relations (we have no way to update pg_class rows
* in other databases), nor for nailed-in-cache relations (the
* relfilenode values for those are hardwired, see relcache.c). It
* might work for other system relations, but I ain't gonna risk it.
*/
if (IsSystemRelation(OldHeap))
elog(ERROR, "CLUSTER: cannot cluster system relation \"%s\"",
@@ -130,16 +130,20 @@ cluster(RangeVar *oldrelation, char *oldindexname)
/*
* Create the new heap, using a temporary name in the same namespace
* as the existing table. NOTE: there is some risk of collision with user
* relnames. Working around this seems more trouble than it's worth; in
* particular, we can't create the new heap in a different namespace from
* the old, or we will have problems with the TEMP status of temp tables.
* as the existing table. NOTE: there is some risk of collision with
* user relnames. Working around this seems more trouble than it's
* worth; in particular, we can't create the new heap in a different
* namespace from the old, or we will have problems with the TEMP
* status of temp tables.
*/
snprintf(NewHeapName, NAMEDATALEN, "pg_temp_%u", OIDOldHeap);
OIDNewHeap = make_new_heap(OIDOldHeap, NewHeapName);
/* We don't need CommandCounterIncrement() because make_new_heap did it. */
/*
* We don't need CommandCounterIncrement() because make_new_heap did
* it.
*/
/*
* Copy the heap data into the new table in the desired order.
@@ -244,14 +248,14 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
while ((tuple = index_getnext(scan, ForwardScanDirection)) != NULL)
{
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
* the source relation would get trashed, which is bad news if
* we abort later on. (This was a bug in releases thru 7.0)
* We must copy the tuple because heap_insert() will overwrite the
* commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus, the
* source relation would get trashed, which is bad news if we
* abort later on. (This was a bug in releases thru 7.0)
*
* Note that the copied tuple will have the original OID, if any,
* so this does preserve OIDs.
* Note that the copied tuple will have the original OID, if any, so
* this does preserve OIDs.
*/
HeapTuple copiedTuple = heap_copytuple(tuple);
@@ -276,15 +280,15 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
static List *
get_indexattr_list(Relation OldHeap, Oid OldIndex)
{
List *indexes = NIL;
List *indexes = NIL;
List *indlist;
/* Ask the relcache to produce a list of the indexes of the old rel */
foreach(indlist, RelationGetIndexList(OldHeap))
{
Oid indexOID = (Oid) lfirsti(indlist);
HeapTuple indexTuple;
HeapTuple classTuple;
Oid indexOID = (Oid) lfirsti(indlist);
HeapTuple indexTuple;
HeapTuple classTuple;
Form_pg_index indexForm;
Form_pg_class classForm;
IndexAttrs *attrs;
@@ -320,7 +324,8 @@ get_indexattr_list(Relation OldHeap, Oid OldIndex)
ReleaseSysCache(classTuple);
ReleaseSysCache(indexTuple);
/* Cons the gathered data into the list. We do not care about
/*
* Cons the gathered data into the list. We do not care about
* ordering, and this is more efficient than append.
*/
indexes = lcons(attrs, indexes);
@@ -330,13 +335,13 @@ get_indexattr_list(Relation OldHeap, Oid OldIndex)
}
/*
* Create new indexes and swap the filenodes with old indexes. Then drop
* Create new indexes and swap the filenodes with old indexes. Then drop
* the new index (carrying the old index filenode along).
*/
static void
recreate_indexattr(Oid OIDOldHeap, List *indexes)
{
List *elem;
List *elem;
foreach(elem, indexes)
{
@@ -352,13 +357,13 @@ recreate_indexattr(Oid OIDOldHeap, List *indexes)
snprintf(newIndexName, NAMEDATALEN, "pg_temp_%u", attrs->indexOID);
/*
* The new index will have primary and constraint status set to false,
* but since we will only use its filenode it doesn't matter:
* after the filenode swap the index will keep the constraint
* status of the old index.
* The new index will have primary and constraint status set to
* false, but since we will only use its filenode it doesn't
* matter: after the filenode swap the index will keep the
* constraint status of the old index.
*/
newIndexOID = index_create(OIDOldHeap, newIndexName,
attrs->indexInfo, attrs->accessMethodOID,
attrs->indexInfo, attrs->accessMethodOID,
attrs->classOID, false,
false, allowSystemTableMods);
CommandCounterIncrement();
@@ -369,8 +374,8 @@ recreate_indexattr(Oid OIDOldHeap, List *indexes)
CommandCounterIncrement();
/*
* Make sure that indisclustered is correct: it should be set
* only for the index we just clustered on.
* Make sure that indisclustered is correct: it should be set only
* for the index we just clustered on.
*/
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(INDEXRELID,
@@ -392,13 +397,13 @@ recreate_indexattr(Oid OIDOldHeap, List *indexes)
object.classId = RelOid_pg_class;
object.objectId = newIndexOID;
object.objectSubId = 0;
/*
* The relation is local to our transaction and we know
* nothing depends on it, so DROP_RESTRICT should be OK.
* The relation is local to our transaction and we know nothing
* depends on it, so DROP_RESTRICT should be OK.
*/
performDeletion(&object, DROP_RESTRICT);
/* performDeletion does CommandCounterIncrement() at its end */
}
}
@@ -473,7 +478,7 @@ swap_relfilenodes(Oid r1, Oid r2)
/* Update the tuples in pg_class */
simple_heap_update(relRelation, &reltup1->t_self, reltup1);
simple_heap_update(relRelation, &reltup2->t_self, reltup2);
/* Keep system catalogs current */
indstate = CatalogOpenIndexes(relRelation);
CatalogIndexInsert(indstate, reltup1);
@@ -481,16 +486,17 @@ swap_relfilenodes(Oid r1, Oid r2)
CatalogCloseIndexes(indstate);
/*
* If we have toast tables associated with the relations being swapped,
* change their dependency links to re-associate them with their new
* owning relations. Otherwise the wrong one will get dropped ...
* If we have toast tables associated with the relations being
* swapped, change their dependency links to re-associate them with
* their new owning relations. Otherwise the wrong one will get
* dropped ...
*
* NOTE: for now, we can assume the new table will have a TOAST table
* if and only if the old one does. This logic might need work if we
* get smarter about dropped columns.
* NOTE: for now, we can assume the new table will have a TOAST table if
* and only if the old one does. This logic might need work if we get
* smarter about dropped columns.
*
* NOTE: at present, a TOAST table's only dependency is the one on
* its owning table. If more are ever created, we'd need to use something
* NOTE: at present, a TOAST table's only dependency is the one on its
* owning table. If more are ever created, we'd need to use something
* more selective than deleteDependencyRecordsFor() to get rid of only
* the link we want.
*/
@@ -532,12 +538,12 @@ swap_relfilenodes(Oid r1, Oid r2)
}
/*
* Blow away the old relcache entries now. We need this kluge because
* relcache.c indexes relcache entries by rd_node as well as OID.
* It will get confused if it is asked to (re)build an entry with a new
* Blow away the old relcache entries now. We need this kluge because
* relcache.c indexes relcache entries by rd_node as well as OID. It
* will get confused if it is asked to (re)build an entry with a new
* rd_node value when there is still another entry laying about with
* that same rd_node value. (Fortunately, since one of the entries
* is local in our transaction, it's sufficient to clear out our own
* that same rd_node value. (Fortunately, since one of the entries is
* local in our transaction, it's sufficient to clear out our own
* relcache this way; the problem cannot arise for other backends when
* they see our update on the non-local relation.)
*/

View File

@@ -7,7 +7,7 @@
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.59 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.60 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -231,7 +231,7 @@ DeleteComments(Oid oid, Oid classoid, int32 subid)
Relation description;
ScanKeyData skey[3];
int nkeys;
SysScanDesc sd;
SysScanDesc sd;
HeapTuple oldtuple;
/* Use the index to search for all matching old tuples */
@@ -260,9 +260,7 @@ DeleteComments(Oid oid, Oid classoid, int32 subid)
SnapshotNow, nkeys, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
{
simple_heap_delete(description, &oldtuple->t_self);
}
/* Done */
@@ -357,8 +355,8 @@ CommentAttribute(List *qualname, char *comment)
nnames = length(qualname);
if (nnames < 2)
elog(ERROR, "CommentAttribute: must specify relation.attribute");
relname = ltruncate(nnames-1, listCopy(qualname));
attrname = strVal(nth(nnames-1, qualname));
relname = ltruncate(nnames - 1, listCopy(qualname));
attrname = strVal(nth(nnames - 1, qualname));
/* Open the containing relation to ensure it won't go away meanwhile */
rel = makeRangeVarFromNameList(relname);
@@ -521,13 +519,13 @@ CommentRule(List *qualname, char *comment)
else
{
elog(ERROR, "rule \"%s\" does not exist", rulename);
reloid = ruleoid = 0; /* keep compiler quiet */
reloid = ruleoid = 0; /* keep compiler quiet */
}
if (HeapTupleIsValid(tuple = heap_getnext(scanDesc,
ForwardScanDirection)))
elog(ERROR, "There are multiple rules \"%s\""
"\n\tPlease specify a relation name as well as a rule name",
"\n\tPlease specify a relation name as well as a rule name",
rulename);
heap_endscan(scanDesc);
@@ -540,8 +538,8 @@ CommentRule(List *qualname, char *comment)
{
/* New-style: rule and relname both provided */
Assert(nnames >= 2);
relname = ltruncate(nnames-1, listCopy(qualname));
rulename = strVal(nth(nnames-1, qualname));
relname = ltruncate(nnames - 1, listCopy(qualname));
rulename = strVal(nth(nnames - 1, qualname));
/* Open the owning relation to ensure it won't go away meanwhile */
rel = makeRangeVarFromNameList(relname);
@@ -724,7 +722,7 @@ CommentTrigger(List *qualname, char *comment)
Relation pg_trigger,
relation;
HeapTuple triggertuple;
SysScanDesc scan;
SysScanDesc scan;
ScanKeyData entry[2];
Oid oid;
@@ -732,8 +730,8 @@ CommentTrigger(List *qualname, char *comment)
nnames = length(qualname);
if (nnames < 2)
elog(ERROR, "CommentTrigger: must specify relation and trigger");
relname = ltruncate(nnames-1, listCopy(qualname));
trigname = strVal(nth(nnames-1, qualname));
relname = ltruncate(nnames - 1, listCopy(qualname));
trigname = strVal(nth(nnames - 1, qualname));
/* Open the owning relation to ensure it won't go away meanwhile */
rel = makeRangeVarFromNameList(relname);
@@ -799,7 +797,7 @@ CommentConstraint(List *qualname, char *comment)
Relation pg_constraint,
relation;
HeapTuple tuple;
SysScanDesc scan;
SysScanDesc scan;
ScanKeyData skey[1];
Oid conOid = InvalidOid;
@@ -807,8 +805,8 @@ CommentConstraint(List *qualname, char *comment)
nnames = length(qualname);
if (nnames < 2)
elog(ERROR, "CommentConstraint: must specify relation and constraint");
relName = ltruncate(nnames-1, listCopy(qualname));
conName = strVal(nth(nnames-1, qualname));
relName = ltruncate(nnames - 1, listCopy(qualname));
conName = strVal(nth(nnames - 1, qualname));
/* Open the owning relation to ensure it won't go away meanwhile */
rel = makeRangeVarFromNameList(relName);
@@ -820,9 +818,9 @@ CommentConstraint(List *qualname, char *comment)
aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(relation));
/*
* Fetch the constraint tuple from pg_constraint. There may be more than
* one match, because constraints are not required to have unique names;
* if so, error out.
* Fetch the constraint tuple from pg_constraint. There may be more
* than one match, because constraints are not required to have unique
* names; if so, error out.
*/
pg_constraint = heap_openr(ConstraintRelationName, AccessShareLock);
@@ -835,7 +833,7 @@ CommentConstraint(List *qualname, char *comment)
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple);
Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple);
if (strcmp(NameStr(con->conname), conName) == 0)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.3 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.4 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,19 +33,19 @@ void
CreateConversionCommand(CreateConversionStmt *stmt)
{
Oid namespaceId;
char *conversion_name;
char *conversion_name;
AclResult aclresult;
int for_encoding;
int to_encoding;
Oid funcoid;
Oid funcnamespace;
char *dummy;
char *dummy;
const char *for_encoding_name = stmt->for_encoding_name;
const char *to_encoding_name = stmt->to_encoding_name;
List *func_name = stmt->func_name;
List *func_name = stmt->func_name;
static Oid funcargs[] = {INT4OID, INT4OID, CSTRINGOID, CSTRINGOID, INT4OID};
static Oid funcargs[] = {INT4OID, INT4OID, CSTRINGOID, CSTRINGOID, INT4OID};
/* Convert list of names to a name and namespace */
namespaceId = QualifiedNameGetCreationNamespace(stmt->conversion_name, &conversion_name);
@@ -64,10 +64,11 @@ CreateConversionCommand(CreateConversionStmt *stmt)
if (to_encoding < 0)
elog(ERROR, "Invalid to encoding name: %s", to_encoding_name);
/* Check the existence of the conversion function.
* Function name could be a qualified name.
/*
* Check the existence of the conversion function. Function name could
* be a qualified name.
*/
funcoid = LookupFuncName(func_name, sizeof(funcargs)/sizeof(Oid), funcargs);
funcoid = LookupFuncName(func_name, sizeof(funcargs) / sizeof(Oid), funcargs);
if (!OidIsValid(funcoid))
elog(ERROR, "Function %s does not exist", NameListToString(func_name));
@@ -80,8 +81,11 @@ CreateConversionCommand(CreateConversionStmt *stmt)
aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, get_namespace_name(funcnamespace));
/* All seem ok, go ahead (possible failure would be a duplicate conversion name) */
/*
* All seem ok, go ahead (possible failure would be a duplicate
* conversion name)
*/
ConversionCreate(conversion_name, namespaceId, GetUserId(),
for_encoding, to_encoding, funcoid, stmt->def);
}
@@ -93,7 +97,7 @@ void
DropConversionCommand(List *name, DropBehavior behavior)
{
Oid namespaceId;
char *conversion_name;
char *conversion_name;
AclResult aclresult;
/* Convert list of names to a name and namespace */
@@ -104,9 +108,9 @@ DropConversionCommand(List *name, DropBehavior behavior)
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, get_namespace_name(namespaceId));
/* Go ahead (possible failure would be:
* none existing conversion
* not ower of this conversion
/*
* Go ahead (possible failure would be: none existing conversion not
* ower of this conversion
*/
ConversionDrop(conversion_name, namespaceId, GetUserId(), behavior);
}

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.170 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.171 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,9 +53,9 @@ typedef enum CopyReadResult
/* non-export function prototypes */
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
FILE *fp, char *delim, char *null_print);
FILE *fp, char *delim, char *null_print);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
FILE *fp, char *delim, char *null_print);
FILE *fp, char *delim, char *null_print);
static Oid GetInputFunction(Oid type);
static Oid GetTypeElement(Oid type);
static char *CopyReadAttribute(FILE *fp, const char *delim, CopyReadResult *result);
@@ -268,17 +268,17 @@ CopyDonePeek(FILE *fp, int c, bool pickup)
void
DoCopy(const CopyStmt *stmt)
{
RangeVar *relation = stmt->relation;
char *filename = stmt->filename;
bool is_from = stmt->is_from;
bool pipe = (stmt->filename == NULL);
List *option;
List *attnamelist = stmt->attlist;
List *attnumlist;
bool binary = false;
bool oids = false;
char *delim = NULL;
char *null_print = NULL;
RangeVar *relation = stmt->relation;
char *filename = stmt->filename;
bool is_from = stmt->is_from;
bool pipe = (stmt->filename == NULL);
List *option;
List *attnamelist = stmt->attlist;
List *attnumlist;
bool binary = false;
bool oids = false;
char *delim = NULL;
char *null_print = NULL;
FILE *fp;
Relation rel;
AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT);
@@ -336,7 +336,7 @@ DoCopy(const CopyStmt *stmt)
if (!null_print)
null_print = "\\N";
/*
* Open and lock the relation, using the appropriate lock type.
*/
@@ -512,8 +512,8 @@ DoCopy(const CopyStmt *stmt)
* Copy from relation TO file.
*/
static void
CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
FILE *fp, char *delim, char *null_print)
CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
FILE *fp, char *delim, char *null_print)
{
HeapTuple tuple;
TupleDesc tupDesc;
@@ -537,24 +537,23 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Get info about the columns we need to process.
*
* For binary copy we really only need isvarlena, but compute it
* all...
* For binary copy we really only need isvarlena, but compute it all...
*/
out_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo));
elements = (Oid *) palloc(num_phys_attrs * sizeof(Oid));
isvarlena = (bool *) palloc(num_phys_attrs * sizeof(bool));
foreach(cur, attnumlist)
{
int attnum = lfirsti(cur);
int attnum = lfirsti(cur);
Oid out_func_oid;
if (!getTypeOutputInfo(attr[attnum-1]->atttypid,
&out_func_oid, &elements[attnum-1],
&isvarlena[attnum-1]))
if (!getTypeOutputInfo(attr[attnum - 1]->atttypid,
&out_func_oid, &elements[attnum - 1],
&isvarlena[attnum - 1]))
elog(ERROR, "COPY: couldn't lookup info for type %u",
attr[attnum-1]->atttypid);
fmgr_info(out_func_oid, &out_functions[attnum-1]);
if (binary && attr[attnum-1]->attlen == -2)
attr[attnum - 1]->atttypid);
fmgr_info(out_func_oid, &out_functions[attnum - 1]);
if (binary && attr[attnum - 1]->attlen == -2)
elog(ERROR, "COPY BINARY: cstring not supported");
}
@@ -597,7 +596,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
/* Send OID if wanted --- note fld_count doesn't include it */
if (oids)
{
Oid oid = HeapTupleGetOid(tuple);
Oid oid = HeapTupleGetOid(tuple);
fld_size = sizeof(Oid);
CopySendData(&fld_size, sizeof(int16), fp);
@@ -610,7 +609,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
if (oids)
{
string = DatumGetCString(DirectFunctionCall1(oidout,
ObjectIdGetDatum(HeapTupleGetOid(tuple))));
ObjectIdGetDatum(HeapTupleGetOid(tuple))));
CopySendString(string, fp);
pfree(string);
need_delim = true;
@@ -619,7 +618,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
foreach(cur, attnumlist)
{
int attnum = lfirsti(cur);
int attnum = lfirsti(cur);
Datum origvalue,
value;
bool isnull;
@@ -653,25 +652,25 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
* (or for binary case, becase we must output untoasted
* value).
*/
if (isvarlena[attnum-1])
if (isvarlena[attnum - 1])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
else
value = origvalue;
if (!binary)
{
string = DatumGetCString(FunctionCall3(&out_functions[attnum-1],
string = DatumGetCString(FunctionCall3(&out_functions[attnum - 1],
value,
ObjectIdGetDatum(elements[attnum-1]),
Int32GetDatum(attr[attnum-1]->atttypmod)));
ObjectIdGetDatum(elements[attnum - 1]),
Int32GetDatum(attr[attnum - 1]->atttypmod)));
CopyAttributeOut(fp, string, delim);
pfree(string);
}
else
{
fld_size = attr[attnum-1]->attlen;
fld_size = attr[attnum - 1]->attlen;
CopySendData(&fld_size, sizeof(int16), fp);
if (isvarlena[attnum-1])
if (isvarlena[attnum - 1])
{
/* varlena */
Assert(fld_size == -1);
@@ -679,7 +678,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
VARSIZE(value),
fp);
}
else if (!attr[attnum-1]->attbyval)
else if (!attr[attnum - 1]->attbyval)
{
/* fixed-length pass-by-reference */
Assert(fld_size > 0);
@@ -734,13 +733,15 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
* Copy FROM file to relation.
*/
static void
CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
FILE *fp, char *delim, char *null_print)
{
HeapTuple tuple;
TupleDesc tupDesc;
Form_pg_attribute *attr;
AttrNumber num_phys_attrs, attr_count, num_defaults;
AttrNumber num_phys_attrs,
attr_count,
num_defaults;
FmgrInfo *in_functions;
Oid *elements;
int i;
@@ -755,8 +756,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
TupleTableSlot *slot;
bool file_has_oids;
int *defmap;
Node **defexprs; /* array of default att expressions */
ExprContext *econtext; /* used for ExecEvalExpr for default atts */
Node **defexprs; /* array of default att expressions */
ExprContext *econtext; /* used for ExecEvalExpr for default atts */
MemoryContext oldcontext = CurrentMemoryContext;
tupDesc = RelationGetDescr(rel);
@@ -787,9 +788,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
ExecSetSlotDescriptor(slot, tupDesc, false);
/*
* pick up the input function and default expression (if any) for
* each attribute in the relation. (We don't actually use the
* input function if it's a binary copy.)
* pick up the input function and default expression (if any) for each
* attribute in the relation. (We don't actually use the input
* function if it's a binary copy.)
*/
defmap = (int *) palloc(sizeof(int) * num_phys_attrs);
defexprs = (Node **) palloc(sizeof(Node *) * num_phys_attrs);
@@ -874,13 +875,13 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
while (!done)
{
bool skip_tuple;
Oid loaded_oid = InvalidOid;
bool skip_tuple;
Oid loaded_oid = InvalidOid;
CHECK_FOR_INTERRUPTS();
copy_lineno++;
/* Reset the per-tuple exprcontext */
ResetPerTupleExprContext(estate);
@@ -894,8 +895,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (!binary)
{
CopyReadResult result = NORMAL_ATTR;
char *string;
CopyReadResult result = NORMAL_ATTR;
char *string;
if (file_has_oids)
{
@@ -918,14 +919,14 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
elog(ERROR, "Invalid Oid");
}
}
/*
* Loop to read the user attributes on the line.
*/
foreach(cur, attnumlist)
{
int attnum = lfirsti(cur);
int m = attnum - 1;
int attnum = lfirsti(cur);
int m = attnum - 1;
/*
* If prior attr on this line was ended by newline or EOF,
@@ -953,8 +954,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
values[m] = FunctionCall3(&in_functions[m],
CStringGetDatum(string),
ObjectIdGetDatum(elements[m]),
Int32GetDatum(attr[m]->atttypmod));
ObjectIdGetDatum(elements[m]),
Int32GetDatum(attr[m]->atttypmod));
nulls[m] = ' ';
}
}
@@ -1009,7 +1010,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
i = 0;
foreach(cur, attnumlist)
{
int attnum = lfirsti(cur);
int attnum = lfirsti(cur);
i++;
@@ -1018,9 +1019,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
elog(ERROR, "COPY BINARY: unexpected EOF");
if (fld_size == 0)
continue; /* it's NULL; nulls[attnum-1] already set */
if (fld_size != attr[attnum-1]->attlen)
if (fld_size != attr[attnum - 1]->attlen)
elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
i, (int) fld_size, (int) attr[attnum-1]->attlen);
i, (int) fld_size, (int) attr[attnum - 1]->attlen);
if (fld_size == -1)
{
/* varlena field */
@@ -1039,9 +1040,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
fp);
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: unexpected EOF");
values[attnum-1] = PointerGetDatum(varlena_ptr);
values[attnum - 1] = PointerGetDatum(varlena_ptr);
}
else if (!attr[attnum-1]->attbyval)
else if (!attr[attnum - 1]->attbyval)
{
/* fixed-length pass-by-reference */
Pointer refval_ptr;
@@ -1051,7 +1052,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
CopyGetData(refval_ptr, fld_size, fp);
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: unexpected EOF");
values[attnum-1] = PointerGetDatum(refval_ptr);
values[attnum - 1] = PointerGetDatum(refval_ptr);
}
else
{
@@ -1059,29 +1060,28 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
Datum datumBuf;
/*
* We need this horsing around because we don't
* know how shorter data values are aligned within
* a Datum.
* We need this horsing around because we don't know
* how shorter data values are aligned within a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: unexpected EOF");
values[attnum-1] = fetch_att(&datumBuf, true, fld_size);
values[attnum - 1] = fetch_att(&datumBuf, true, fld_size);
}
nulls[attnum-1] = ' ';
nulls[attnum - 1] = ' ';
}
}
/*
* Now compute and insert any defaults available for the
* columns not provided by the input data. Anything not
* processed here or above will remain NULL.
* Now compute and insert any defaults available for the columns
* not provided by the input data. Anything not processed here or
* above will remain NULL.
*/
for (i = 0; i < num_defaults; i++)
{
bool isnull;
bool isnull;
values[defmap[i]] = ExecEvalExpr(defexprs[i], econtext,
&isnull, NULL);
@@ -1093,7 +1093,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
* And now we can form the input tuple.
*/
tuple = heap_formtuple(tupDesc, values, nulls);
if (oids && file_has_oids)
HeapTupleSetOid(tuple, loaded_oid);
@@ -1464,14 +1464,14 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
static List *
CopyGetAttnums(Relation rel, List *attnamelist)
{
List *attnums = NIL;
List *attnums = NIL;
if (attnamelist == NIL)
{
/* Generate default column list */
TupleDesc tupDesc = RelationGetDescr(rel);
TupleDesc tupDesc = RelationGetDescr(rel);
Form_pg_attribute *attr = tupDesc->attrs;
int attr_count = tupDesc->natts;
int attr_count = tupDesc->natts;
int i;
for (i = 0; i < attr_count; i++)

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.104 2002/09/03 22:17:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.105 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,14 +79,14 @@ createdb(const CreatedbStmt *stmt)
int32 datdba;
List *option;
DefElem *downer = NULL;
DefElem *dpath = NULL;
DefElem *dtemplate = NULL;
DefElem *dencoding = NULL;
DefElem *dpath = NULL;
DefElem *dtemplate = NULL;
DefElem *dencoding = NULL;
char *dbname = stmt->dbname;
char *dbowner = NULL;
char *dbpath = NULL;
char *dbtemplate = NULL;
int encoding = -1;
int encoding = -1;
/* Extract options from the statement node tree */
foreach(option, stmt->options)
@@ -133,7 +133,7 @@ createdb(const CreatedbStmt *stmt)
/* obtain sysid of proposed owner */
if (dbowner)
datdba = get_usesysid(dbowner); /* will elog if no such user */
datdba = get_usesysid(dbowner); /* will elog if no such user */
else
datdba = GetUserId();
@@ -185,7 +185,7 @@ createdb(const CreatedbStmt *stmt)
*/
if (!src_istemplate)
{
if (!superuser() && GetUserId() != src_owner )
if (!superuser() && GetUserId() != src_owner)
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate);
}
@@ -226,10 +226,10 @@ createdb(const CreatedbStmt *stmt)
* database), and resolve alternate physical location if one is
* specified.
*
* If an alternate location is specified but is the same as the
* normal path, just drop the alternate-location spec (this seems
* friendlier than erroring out). We must test this case to avoid
* creating a circular symlink below.
* If an alternate location is specified but is the same as the normal
* path, just drop the alternate-location spec (this seems friendlier
* than erroring out). We must test this case to avoid creating a
* circular symlink below.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@@ -328,11 +328,12 @@ createdb(const CreatedbStmt *stmt)
/* do not set datpath to null, GetRawDatabaseInfo won't cope */
new_record[Anum_pg_database_datpath - 1] =
DirectFunctionCall1(textin, CStringGetDatum(dbpath ? dbpath : ""));
/*
* We deliberately set datconfig and datacl to defaults (NULL), rather
* than copying them from the template database. Copying datacl would
* be a bad idea when the owner is not the same as the template's owner.
* It's more debatable whether datconfig should be copied.
* be a bad idea when the owner is not the same as the template's
* owner. It's more debatable whether datconfig should be copied.
*/
new_record_nulls[Anum_pg_database_datconfig - 1] = 'n';
new_record_nulls[Anum_pg_database_datacl - 1] = 'n';
@@ -495,7 +496,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
HeapTuple tuple,
newtuple;
Relation rel;
ScanKeyData scankey;
ScanKeyData scankey;
HeapScanDesc scan;
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
@@ -512,25 +513,25 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
elog(ERROR, "database \"%s\" does not exist", stmt->dbname);
if (!(superuser()
|| ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
|| ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
elog(ERROR, "permission denied");
MemSet(repl_repl, ' ', sizeof(repl_repl));
repl_repl[Anum_pg_database_datconfig-1] = 'r';
repl_repl[Anum_pg_database_datconfig - 1] = 'r';
if (strcmp(stmt->variable, "all")==0 && valuestr == NULL)
if (strcmp(stmt->variable, "all") == 0 && valuestr == NULL)
{
/* RESET ALL */
repl_null[Anum_pg_database_datconfig-1] = 'n';
repl_val[Anum_pg_database_datconfig-1] = (Datum) 0;
repl_null[Anum_pg_database_datconfig - 1] = 'n';
repl_val[Anum_pg_database_datconfig - 1] = (Datum) 0;
}
else
{
Datum datum;
bool isnull;
ArrayType *a;
Datum datum;
bool isnull;
ArrayType *a;
repl_null[Anum_pg_database_datconfig-1] = ' ';
repl_null[Anum_pg_database_datconfig - 1] = ' ';
datum = heap_getattr(tuple, Anum_pg_database_datconfig,
RelationGetDescr(rel), &isnull);
@@ -542,7 +543,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
else
a = GUCArrayDelete(a, stmt->variable);
repl_val[Anum_pg_database_datconfig-1] = PointerGetDatum(a);
repl_val[Anum_pg_database_datconfig - 1] = PointerGetDatum(a);
}
newtuple = heap_modifytuple(tuple, rel, repl_val, repl_null, repl_repl);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.79 2002/08/10 19:01:53 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.80 2002/09/04 20:31:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -132,10 +132,11 @@ defGetInt64(DefElem *def)
case T_Integer:
return (int64) intVal(def->arg);
case T_Float:
/*
* Values too large for int4 will be represented as Float
* constants by the lexer. Accept these if they are valid int8
* strings.
* constants by the lexer. Accept these if they are valid
* int8 strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
CStringGetDatum(strVal(def->arg))));
@@ -188,14 +189,14 @@ defGetTypeName(DefElem *def)
case T_TypeName:
return (TypeName *) def->arg;
case T_String:
{
/* Allow quoted typename for backwards compatibility */
TypeName *n = makeNode(TypeName);
{
/* Allow quoted typename for backwards compatibility */
TypeName *n = makeNode(TypeName);
n->names = makeList1(def->arg);
n->typmod = -1;
return n;
}
n->names = makeList1(def->arg);
n->typmod = -1;
return n;
}
default:
elog(ERROR, "Define: argument of \"%s\" must be a type name",
def->defname);

View File

@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.86 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.87 2002/09/04 20:31:15 momjian Exp $
*
*/
@@ -41,18 +41,18 @@ typedef struct ExplainState
static StringInfo Explain_PlanToString(Plan *plan, ExplainState *es);
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
TupOutputState *tstate);
TupOutputState *tstate);
static void explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
int indent, ExplainState *es);
int indent, ExplainState *es);
static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
int scanrelid, Plan *outer_plan,
StringInfo str, int indent, ExplainState *es);
int scanrelid, Plan *outer_plan,
StringInfo str, int indent, ExplainState *es);
static void show_upper_qual(List *qual, const char *qlabel,
const char *outer_name, int outer_varno, Plan *outer_plan,
const char *inner_name, int inner_varno, Plan *inner_plan,
StringInfo str, int indent, ExplainState *es);
const char *outer_name, int outer_varno, Plan *outer_plan,
const char *inner_name, int inner_varno, Plan *inner_plan,
StringInfo str, int indent, ExplainState *es);
static void show_sort_keys(List *tlist, int nkeys, const char *qlabel,
StringInfo str, int indent, ExplainState *es);
StringInfo str, int indent, ExplainState *es);
static Node *make_ors_ands_explicit(List *orclauses);
/*
@@ -189,7 +189,7 @@ ExplainOneQuery(Query *query, ExplainStmt *stmt, TupOutputState *tstate)
do_text_output_multiline(tstate, f);
pfree(f);
if (es->printCost)
do_text_output_oneline(tstate, ""); /* separator line */
do_text_output_oneline(tstate, ""); /* separator line */
}
}
@@ -325,7 +325,7 @@ explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
relation = index_open(lfirsti(l));
appendStringInfo(str, "%s%s",
(++i > 1) ? ", " : "",
quote_identifier(RelationGetRelationName(relation)));
quote_identifier(RelationGetRelationName(relation)));
index_close(relation);
}
/* FALL THRU */
@@ -335,7 +335,7 @@ explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
{
RangeTblEntry *rte = rt_fetch(((Scan *) plan)->scanrelid,
es->rtable);
char *relname;
char *relname;
/* Assume it's on a real relation */
Assert(rte->rtekind == RTE_RELATION);
@@ -347,7 +347,7 @@ explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
quote_identifier(relname));
if (strcmp(rte->eref->aliasname, relname) != 0)
appendStringInfo(str, " %s",
quote_identifier(rte->eref->aliasname));
quote_identifier(rte->eref->aliasname));
}
break;
case T_SubqueryScan:
@@ -365,10 +365,10 @@ explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
{
RangeTblEntry *rte = rt_fetch(((Scan *) plan)->scanrelid,
es->rtable);
Expr *expr;
Func *funcnode;
Oid funcid;
char *proname;
Expr *expr;
Func *funcnode;
Oid funcid;
char *proname;
/* Assert it's on a RangeFunction */
Assert(rte->rtekind == RTE_FUNCTION);
@@ -384,7 +384,7 @@ explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
quote_identifier(proname));
if (strcmp(rte->eref->aliasname, proname) != 0)
appendStringInfo(str, " %s",
quote_identifier(rte->eref->aliasname));
quote_identifier(rte->eref->aliasname));
}
break;
default:
@@ -482,7 +482,7 @@ explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
case T_SubqueryScan:
show_upper_qual(plan->qual,
"Filter",
"subplan", 1, ((SubqueryScan *) plan)->subplan,
"subplan", 1, ((SubqueryScan *) plan)->subplan,
"", 0, NULL,
str, indent, es);
break;
@@ -662,14 +662,14 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
/*
* If we have an outer plan that is referenced by the qual, add it to
* the deparse context. If not, don't (so that we don't force prefixes
* unnecessarily).
* the deparse context. If not, don't (so that we don't force
* prefixes unnecessarily).
*/
if (outer_plan)
{
if (intMember(OUTER, pull_varnos(node)))
outercontext = deparse_context_for_subplan("outer",
outer_plan->targetlist,
outer_plan->targetlist,
es->rtable);
else
outercontext = NULL;
@@ -760,10 +760,11 @@ show_sort_keys(List *tlist, int nkeys, const char *qlabel,
/*
* In this routine we expect that the plan node's tlist has not been
* processed by set_plan_references(). Normally, any Vars will contain
* valid varnos referencing the actual rtable. But we might instead be
* looking at a dummy tlist generated by prepunion.c; if there are
* Vars with zero varno, use the tlist itself to determine their names.
* processed by set_plan_references(). Normally, any Vars will
* contain valid varnos referencing the actual rtable. But we might
* instead be looking at a dummy tlist generated by prepunion.c; if
* there are Vars with zero varno, use the tlist itself to determine
* their names.
*/
if (intMember(0, pull_varnos((Node *) tlist)))
{
@@ -811,7 +812,7 @@ show_sort_keys(List *tlist, int nkeys, const char *qlabel,
}
/*
* Indexscan qual lists have an implicit OR-of-ANDs structure. Make it
* Indexscan qual lists have an implicit OR-of-ANDs structure. Make it
* explicit so deparsing works properly.
*/
static Node *
@@ -823,13 +824,11 @@ make_ors_ands_explicit(List *orclauses)
return (Node *) make_ands_explicit(lfirst(orclauses));
else
{
List *args = NIL;
List *orptr;
List *args = NIL;
List *orptr;
foreach(orptr, orclauses)
{
args = lappend(args, make_ands_explicit(lfirst(orptr)));
}
return (Node *) make_orclause(args);
}

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.18 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.19 2002/09/04 20:31:15 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -61,7 +61,7 @@
* allow a shell type to be used, or even created if the specified return type
* doesn't exist yet. (Without this, there's no way to define the I/O procs
* for a new type.) But SQL function creation won't cope, so error out if
* the target language is SQL. (We do this here, not in the SQL-function
* the target language is SQL. (We do this here, not in the SQL-function
* validator, so as not to produce a WARNING and then an ERROR for the same
* condition.)
*/
@@ -69,7 +69,7 @@ static void
compute_return_type(TypeName *returnType, Oid languageOid,
Oid *prorettype_p, bool *returnsSet_p)
{
Oid rettype;
Oid rettype;
rettype = LookupTypeName(returnType);
@@ -87,7 +87,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
}
else
{
char *typnam = TypeNameToString(returnType);
char *typnam = TypeNameToString(returnType);
Oid namespaceId;
AclResult aclresult;
char *typname;
@@ -184,41 +184,41 @@ compute_attributes_sql_style(const List *options,
bool *security_definer)
{
const List *option;
DefElem *as_item = NULL;
DefElem *language_item = NULL;
DefElem *volatility_item = NULL;
DefElem *strict_item = NULL;
DefElem *security_item = NULL;
DefElem *as_item = NULL;
DefElem *language_item = NULL;
DefElem *volatility_item = NULL;
DefElem *strict_item = NULL;
DefElem *security_item = NULL;
foreach(option, options)
{
DefElem *defel = (DefElem *) lfirst(option);
if (strcmp(defel->defname, "as")==0)
if (strcmp(defel->defname, "as") == 0)
{
if (as_item)
elog(ERROR, "conflicting or redundant options");
as_item = defel;
}
else if (strcmp(defel->defname, "language")==0)
else if (strcmp(defel->defname, "language") == 0)
{
if (language_item)
elog(ERROR, "conflicting or redundant options");
language_item = defel;
}
else if (strcmp(defel->defname, "volatility")==0)
else if (strcmp(defel->defname, "volatility") == 0)
{
if (volatility_item)
elog(ERROR, "conflicting or redundant options");
volatility_item = defel;
}
else if (strcmp(defel->defname, "strict")==0)
else if (strcmp(defel->defname, "strict") == 0)
{
if (strict_item)
elog(ERROR, "conflicting or redundant options");
strict_item = defel;
}
else if (strcmp(defel->defname, "security")==0)
else if (strcmp(defel->defname, "security") == 0)
{
if (security_item)
elog(ERROR, "conflicting or redundant options");
@@ -229,7 +229,7 @@ compute_attributes_sql_style(const List *options,
}
if (as_item)
*as = (List *)as_item->arg;
*as = (List *) as_item->arg;
else
elog(ERROR, "no function body specified");
@@ -240,11 +240,11 @@ compute_attributes_sql_style(const List *options,
if (volatility_item)
{
if (strcmp(strVal(volatility_item->arg), "immutable")==0)
if (strcmp(strVal(volatility_item->arg), "immutable") == 0)
*volatility_p = PROVOLATILE_IMMUTABLE;
else if (strcmp(strVal(volatility_item->arg), "stable")==0)
else if (strcmp(strVal(volatility_item->arg), "stable") == 0)
*volatility_p = PROVOLATILE_STABLE;
else if (strcmp(strVal(volatility_item->arg), "volatile")==0)
else if (strcmp(strVal(volatility_item->arg), "volatile") == 0)
*volatility_p = PROVOLATILE_VOLATILE;
else
elog(ERROR, "invalid volatility");
@@ -386,7 +386,7 @@ CreateFunction(CreateFunctionStmt *stmt)
/* override attributes from explicit list */
compute_attributes_sql_style(stmt->options,
&as_clause, &language, &volatility, &isStrict, &security);
&as_clause, &language, &volatility, &isStrict, &security);
/* Convert language name to canonical case */
case_translate_language_name(language, languageName);
@@ -439,13 +439,12 @@ CreateFunction(CreateFunctionStmt *stmt)
if (languageOid == INTERNALlanguageId)
{
/*
* In PostgreSQL versions before 6.5, the SQL name of the
* created function could not be different from the internal
* name, and "prosrc" wasn't used. So there is code out there
* that does CREATE FUNCTION xyz AS '' LANGUAGE 'internal'.
* To preserve some modicum of backwards compatibility, accept
* an empty "prosrc" value as meaning the supplied SQL
* function name.
* In PostgreSQL versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and
* "prosrc" wasn't used. So there is code out there that does
* CREATE FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some
* modicum of backwards compatibility, accept an empty "prosrc"
* value as meaning the supplied SQL function name.
*/
if (strlen(prosrc_str) == 0)
prosrc_str = funcname;
@@ -488,7 +487,7 @@ void
RemoveFunction(RemoveFuncStmt *stmt)
{
List *functionName = stmt->funcname;
List *argTypes = stmt->args; /* list of TypeName nodes */
List *argTypes = stmt->args; /* list of TypeName nodes */
Oid funcOid;
HeapTuple tup;
ObjectAddress object;
@@ -496,13 +495,13 @@ RemoveFunction(RemoveFuncStmt *stmt)
/*
* Find the function, do permissions and validity checks
*/
funcOid = LookupFuncNameTypeNames(functionName, argTypes,
funcOid = LookupFuncNameTypeNames(functionName, argTypes,
"RemoveFunction");
tup = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveFunction: couldn't find tuple for function %s",
NameListToString(functionName));
@@ -557,7 +556,7 @@ RemoveFunctionById(Oid funcOid)
tup = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveFunctionById: couldn't find tuple for function %u",
funcOid);
@@ -579,7 +578,7 @@ RemoveFunctionById(Oid funcOid)
tup = SearchSysCache(AGGFNOID,
ObjectIdGetDatum(funcOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveFunctionById: couldn't find pg_aggregate tuple for %u",
funcOid);
@@ -611,7 +610,7 @@ CreateCast(CreateCastStmt *stmt)
int i;
ObjectAddress myself,
referenced;
referenced;
sourcetypeid = LookupTypeName(stmt->sourcetype);
if (!OidIsValid(sourcetypeid))
@@ -693,10 +692,10 @@ CreateCast(CreateCastStmt *stmt)
}
/* ready to go */
values[Anum_pg_cast_castsource-1] = ObjectIdGetDatum(sourcetypeid);
values[Anum_pg_cast_casttarget-1] = ObjectIdGetDatum(targettypeid);
values[Anum_pg_cast_castfunc-1] = ObjectIdGetDatum(funcid);
values[Anum_pg_cast_castimplicit-1] = BoolGetDatum(stmt->implicit);
values[Anum_pg_cast_castsource - 1] = ObjectIdGetDatum(sourcetypeid);
values[Anum_pg_cast_casttarget - 1] = ObjectIdGetDatum(targettypeid);
values[Anum_pg_cast_castfunc - 1] = ObjectIdGetDatum(funcid);
values[Anum_pg_cast_castimplicit - 1] = BoolGetDatum(stmt->implicit);
for (i = 0; i < Natts_pg_cast; ++i)
nulls[i] = ' ';
@@ -760,9 +759,9 @@ DropCast(DropCastStmt *stmt)
TypeNameToString(stmt->targettype));
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
ObjectIdGetDatum(targettypeid),
0, 0);
ObjectIdGetDatum(sourcetypeid),
ObjectIdGetDatum(targettypeid),
0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cast from type %s to type %s does not exist",
TypeNameToString(stmt->sourcetype),

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.86 2002/08/30 22:18:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.87 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -117,9 +117,9 @@ DefineIndex(RangeVar *heapRelation,
/*
* Verify we (still) have CREATE rights in the rel's namespace.
* (Presumably we did when the rel was created, but maybe not anymore.)
* Skip check if bootstrapping, since permissions machinery may not
* be working yet.
* (Presumably we did when the rel was created, but maybe not
* anymore.) Skip check if bootstrapping, since permissions machinery
* may not be working yet.
*/
if (!IsBootstrapProcessingMode())
{
@@ -254,8 +254,8 @@ CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid)
elog(ERROR, "Cannot use aggregate in index predicate");
/*
* A predicate using mutable functions is probably wrong, for the
* same reasons that we don't allow a functional index to use one.
* A predicate using mutable functions is probably wrong, for the same
* reasons that we don't allow a functional index to use one.
*/
if (contain_mutable_functions((Node *) predList))
elog(ERROR, "Functions in index predicate must be marked isImmutable");
@@ -432,7 +432,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
if (schemaname)
{
/* Look in specific schema only */
Oid namespaceId;
Oid namespaceId;
namespaceId = LookupExplicitNamespace(schemaname);
tuple = SearchSysCache(CLAAMNAMENSP,
@@ -458,15 +458,15 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
NameListToString(attribute->opclass), accessMethodName);
/*
* Verify that the index operator class accepts this
* datatype. Note we will accept binary compatibility.
* Verify that the index operator class accepts this datatype. Note
* we will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
opInputType = ((Form_pg_opclass) GETSTRUCT(tuple))->opcintype;
if (!IsBinaryCompatible(attrType, opInputType))
elog(ERROR, "operator class \"%s\" does not accept data type %s",
NameListToString(attribute->opclass), format_type_be(attrType));
NameListToString(attribute->opclass), format_type_be(attrType));
ReleaseSysCache(tuple);
@@ -547,7 +547,7 @@ RemoveIndex(RangeVar *relation, DropBehavior behavior)
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
elog(ERROR, "relation \"%s\" is of type \"%c\"",
relation->relname, ((Form_pg_class) GETSTRUCT(tuple))->relkind);
relation->relname, ((Form_pg_class) GETSTRUCT(tuple))->relkind);
ReleaseSysCache(tuple);
@@ -704,7 +704,7 @@ ReindexDatabase(const char *dbname, bool force, bool all)
relcnt = relalc = 0;
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
char relkind;
char relkind;
if (!all)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/lockcmds.c,v 1.3 2002/06/20 20:29:27 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/lockcmds.c,v 1.4 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,8 +43,8 @@ LockTableCommand(LockStmt *lockstmt)
Relation rel;
/*
* We don't want to open the relation until we've checked privilege.
* So, manually get the relation OID.
* We don't want to open the relation until we've checked
* privilege. So, manually get the relation OID.
*/
reloid = RangeVarGetRelid(relation, false);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.4 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.5 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@
static void storeOperators(Oid opclassoid, int numOperators,
Oid *operators, bool *recheck);
Oid *operators, bool *recheck);
static void storeProcedures(Oid opclassoid, int numProcs, Oid *procedures);
@@ -68,8 +68,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
AclResult aclresult;
NameData opcName;
int i;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/* Convert list of names to a name and namespace */
namespaceoid = QualifiedNameGetCreationNamespace(stmt->opclassname,
@@ -107,9 +107,9 @@ DefineOpClass(CreateOpClassStmt *stmt)
storageoid = InvalidOid;
/*
* Create work arrays to hold info about operators and procedures.
* We do this mainly so that we can detect duplicate strategy
* numbers and support-proc numbers.
* Create work arrays to hold info about operators and procedures. We
* do this mainly so that we can detect duplicate strategy numbers and
* support-proc numbers.
*/
operators = (Oid *) palloc(sizeof(Oid) * numOperators);
MemSet(operators, 0, sizeof(Oid) * numOperators);
@@ -141,11 +141,11 @@ DefineOpClass(CreateOpClassStmt *stmt)
item->number);
if (item->args != NIL)
{
TypeName *typeName1 = (TypeName *) lfirst(item->args);
TypeName *typeName2 = (TypeName *) lsecond(item->args);
TypeName *typeName1 = (TypeName *) lfirst(item->args);
TypeName *typeName2 = (TypeName *) lsecond(item->args);
operOid = LookupOperNameTypeNames(item->name,
typeName1, typeName2,
typeName1, typeName2,
"DefineOpClass");
/* No need to check for error */
}
@@ -221,8 +221,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
rel = heap_openr(OperatorClassRelationName, RowExclusiveLock);
/*
* Make sure there is no existing opclass of this name (this is
* just to give a more friendly error message than "duplicate key").
* Make sure there is no existing opclass of this name (this is just
* to give a more friendly error message than "duplicate key").
*/
if (SearchSysCacheExists(CLAAMNAMENSP,
ObjectIdGetDatum(amoid),
@@ -233,12 +233,12 @@ DefineOpClass(CreateOpClassStmt *stmt)
opcname, stmt->amname);
/*
* If we are creating a default opclass, check there isn't one already.
* (XXX should we restrict this test to visible opclasses?)
* If we are creating a default opclass, check there isn't one
* already. (XXX should we restrict this test to visible opclasses?)
*/
if (stmt->isDefault)
{
ScanKeyData skey[1];
ScanKeyData skey[1];
SysScanDesc scan;
ScanKeyEntryInitialize(&skey[0], 0x0,
@@ -276,11 +276,11 @@ DefineOpClass(CreateOpClassStmt *stmt)
values[i++] = ObjectIdGetDatum(amoid); /* opcamid */
namestrcpy(&opcName, opcname);
values[i++] = NameGetDatum(&opcName); /* opcname */
values[i++] = ObjectIdGetDatum(namespaceoid); /* opcnamespace */
values[i++] = ObjectIdGetDatum(namespaceoid); /* opcnamespace */
values[i++] = Int32GetDatum(GetUserId()); /* opcowner */
values[i++] = ObjectIdGetDatum(typeoid); /* opcintype */
values[i++] = BoolGetDatum(stmt->isDefault); /* opcdefault */
values[i++] = ObjectIdGetDatum(storageoid); /* opckeytype */
values[i++] = BoolGetDatum(stmt->isDefault); /* opcdefault */
values[i++] = ObjectIdGetDatum(storageoid); /* opckeytype */
tup = heap_formtuple(rel->rd_att, values, nulls);
@@ -291,8 +291,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
heap_freetuple(tup);
/*
* Now add tuples to pg_amop and pg_amproc tying in the
* operators and functions.
* Now add tuples to pg_amop and pg_amproc tying in the operators and
* functions.
*/
storeOperators(opclassoid, numOperators, operators, recheck);
storeProcedures(opclassoid, numProcs, procedures);
@@ -358,11 +358,12 @@ static void
storeOperators(Oid opclassoid, int numOperators,
Oid *operators, bool *recheck)
{
Relation rel;
Datum values[Natts_pg_amop];
char nulls[Natts_pg_amop];
HeapTuple tup;
int i, j;
Relation rel;
Datum values[Natts_pg_amop];
char nulls[Natts_pg_amop];
HeapTuple tup;
int i,
j;
rel = heap_openr(AccessMethodOperatorRelationName, RowExclusiveLock);
@@ -378,9 +379,9 @@ storeOperators(Oid opclassoid, int numOperators,
}
i = 0;
values[i++] = ObjectIdGetDatum(opclassoid); /* amopclaid */
values[i++] = Int16GetDatum(j + 1); /* amopstrategy */
values[i++] = BoolGetDatum(recheck[j]); /* amopreqcheck */
values[i++] = ObjectIdGetDatum(opclassoid); /* amopclaid */
values[i++] = Int16GetDatum(j + 1); /* amopstrategy */
values[i++] = BoolGetDatum(recheck[j]); /* amopreqcheck */
values[i++] = ObjectIdGetDatum(operators[j]); /* amopopr */
tup = heap_formtuple(rel->rd_att, values, nulls);
@@ -401,11 +402,12 @@ storeOperators(Oid opclassoid, int numOperators,
static void
storeProcedures(Oid opclassoid, int numProcs, Oid *procedures)
{
Relation rel;
Datum values[Natts_pg_amproc];
char nulls[Natts_pg_amproc];
HeapTuple tup;
int i, j;
Relation rel;
Datum values[Natts_pg_amproc];
char nulls[Natts_pg_amproc];
HeapTuple tup;
int i,
j;
rel = heap_openr(AccessMethodProcedureRelationName, RowExclusiveLock);
@@ -421,9 +423,9 @@ storeProcedures(Oid opclassoid, int numProcs, Oid *procedures)
}
i = 0;
values[i++] = ObjectIdGetDatum(opclassoid); /* amopclaid */
values[i++] = Int16GetDatum(j + 1); /* amprocnum */
values[i++] = ObjectIdGetDatum(procedures[j]); /* amproc */
values[i++] = ObjectIdGetDatum(opclassoid); /* amopclaid */
values[i++] = Int16GetDatum(j + 1); /* amprocnum */
values[i++] = ObjectIdGetDatum(procedures[j]); /* amproc */
tup = heap_formtuple(rel->rd_att, values, nulls);
@@ -445,14 +447,15 @@ storeProcedures(Oid opclassoid, int numProcs, Oid *procedures)
void
RemoveOpClass(RemoveOpClassStmt *stmt)
{
Oid amID, opcID;
Oid amID,
opcID;
char *schemaname;
char *opcname;
HeapTuple tuple;
ObjectAddress object;
/*
* Get the access method's OID.
* Get the access method's OID.
*/
amID = GetSysCacheOid(AMNAME,
CStringGetDatum(stmt->amname),
@@ -471,7 +474,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
if (schemaname)
{
/* Look in specific schema only */
Oid namespaceId;
Oid namespaceId;
namespaceId = LookupExplicitNamespace(schemaname);
tuple = SearchSysCache(CLAAMNAMENSP,
@@ -523,10 +526,10 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
void
RemoveOpClassById(Oid opclassOid)
{
Relation rel;
HeapTuple tup;
ScanKeyData skey[1];
SysScanDesc scan;
Relation rel;
HeapTuple tup;
ScanKeyData skey[1];
SysScanDesc scan;
/*
* First remove the pg_opclass entry itself.
@@ -536,7 +539,7 @@ RemoveOpClassById(Oid opclassOid)
tup = SearchSysCache(CLAOID,
ObjectIdGetDatum(opclassOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveOpClassById: couldn't find pg_class entry %u",
opclassOid);
@@ -559,9 +562,7 @@ RemoveOpClassById(Oid opclassOid)
SnapshotNow, 1, skey);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
simple_heap_delete(rel, &tup->t_self);
}
systable_endscan(scan);
heap_close(rel, RowExclusiveLock);
@@ -579,9 +580,7 @@ RemoveOpClassById(Oid opclassOid)
SnapshotNow, 1, skey);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
simple_heap_delete(rel, &tup->t_self);
}
systable_endscan(scan);
heap_close(rel, RowExclusiveLock);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.6 2002/07/24 19:11:09 petere Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.7 2002/09/04 20:31:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -62,7 +62,7 @@ DefineOperator(List *names, List *parameters)
char *oprName;
Oid oprNamespace;
AclResult aclresult;
bool canHash = false; /* operator hashes */
bool canHash = false; /* operator hashes */
bool canMerge = false; /* operator merges */
List *functionName = NIL; /* function for operator */
TypeName *typeName1 = NULL; /* first type name */
@@ -74,7 +74,7 @@ DefineOperator(List *names, List *parameters)
List *negatorName = NIL; /* optional negator operator name */
List *restrictionName = NIL; /* optional restrict. sel.
* procedure */
List *joinName = NIL; /* optional join sel. procedure */
List *joinName = NIL; /* optional join sel. procedure */
List *leftSortName = NIL; /* optional left sort operator */
List *rightSortName = NIL; /* optional right sort operator */
List *ltCompareName = NIL; /* optional < compare operator */
@@ -173,7 +173,7 @@ DefineOperator(List *names, List *parameters)
* now have OperatorCreate do all the work..
*/
OperatorCreate(oprName, /* operator name */
oprNamespace, /* namespace */
oprNamespace, /* namespace */
typeId1, /* left type id */
typeId2, /* right type id */
functionName, /* function for operator */
@@ -185,9 +185,9 @@ DefineOperator(List *names, List *parameters)
joinName, /* optional join sel. procedure name */
canHash, /* operator hashes */
leftSortName, /* optional left sort operator */
rightSortName, /* optional right sort operator */
ltCompareName, /* optional < comparison op */
gtCompareName); /* optional < comparison op */
rightSortName, /* optional right sort operator */
ltCompareName, /* optional < comparison op */
gtCompareName); /* optional < comparison op */
}
@@ -198,9 +198,9 @@ DefineOperator(List *names, List *parameters)
void
RemoveOperator(RemoveOperStmt *stmt)
{
List *operatorName = stmt->opname;
TypeName *typeName1 = (TypeName *) lfirst(stmt->args);
TypeName *typeName2 = (TypeName *) lsecond(stmt->args);
List *operatorName = stmt->opname;
TypeName *typeName1 = (TypeName *) lfirst(stmt->args);
TypeName *typeName2 = (TypeName *) lsecond(stmt->args);
Oid operOid;
HeapTuple tup;
ObjectAddress object;
@@ -211,7 +211,7 @@ RemoveOperator(RemoveOperStmt *stmt)
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(operOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveOperator: failed to find tuple for operator '%s'",
NameListToString(operatorName));
@@ -247,7 +247,7 @@ RemoveOperatorById(Oid operOid)
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(operOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "RemoveOperatorById: failed to find tuple for operator %u",
operOid);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.2 2002/05/21 22:05:54 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.3 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,12 +134,12 @@ PerformPortalFetch(char *name,
* Determine which direction to go in, and check to see if we're
* already at the end of the available tuples in that direction. If
* so, set the direction to NoMovement to avoid trying to fetch any
* tuples. (This check exists because not all plan node types
* are robust about being called again if they've already returned
* NULL once.) Then call the executor (we must not skip this, because
* the destination needs to see a setup and shutdown even if no tuples
* are available). Finally, update the atStart/atEnd state depending
* on the number of tuples that were retrieved.
* tuples. (This check exists because not all plan node types are
* robust about being called again if they've already returned NULL
* once.) Then call the executor (we must not skip this, because the
* destination needs to see a setup and shutdown even if no tuples are
* available). Finally, update the atStart/atEnd state depending on
* the number of tuples that were retrieved.
*/
if (forward)
{
@@ -151,9 +151,9 @@ PerformPortalFetch(char *name,
ExecutorRun(queryDesc, estate, direction, (long) count);
if (estate->es_processed > 0)
portal->atStart = false; /* OK to back up now */
portal->atStart = false; /* OK to back up now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atEnd = true; /* we retrieved 'em all */
portal->atEnd = true; /* we retrieved 'em all */
}
else
{
@@ -165,9 +165,9 @@ PerformPortalFetch(char *name,
ExecutorRun(queryDesc, estate, direction, (long) count);
if (estate->es_processed > 0)
portal->atEnd = false; /* OK to go forward now */
portal->atEnd = false; /* OK to go forward now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atStart = true; /* we retrieved 'em all */
portal->atStart = true; /* we retrieved 'em all */
}
/* Return command status if wanted */

View File

@@ -6,7 +6,7 @@
* Copyright (c) 2002, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.1 2002/08/27 04:55:07 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.2 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@ typedef struct
List *query_list; /* list of queries */
List *plan_list; /* list of plans */
List *argtype_list; /* list of parameter type OIDs */
MemoryContext context; /* context containing this query */
MemoryContext context; /* context containing this query */
} QueryHashEntry;
/*
@@ -47,7 +47,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static void StoreQuery(const char *stmt_name, List *query_list,
List *plan_list, List *argtype_list);
List *plan_list, List *argtype_list);
static QueryHashEntry *FetchQuery(const char *plan_name);
static void RunQuery(QueryDesc *qdesc, EState *state);
@@ -58,9 +58,9 @@ static void RunQuery(QueryDesc *qdesc, EState *state);
void
PrepareQuery(PrepareStmt *stmt)
{
List *plan_list = NIL;
List *query_list,
*query_list_item;
List *plan_list = NIL;
List *query_list,
*query_list_item;
if (!stmt->name)
elog(ERROR, "No statement name given");
@@ -73,8 +73,8 @@ PrepareQuery(PrepareStmt *stmt)
foreach(query_list_item, query_list)
{
Query *query = (Query *) lfirst(query_list_item);
Plan *plan;
Query *query = (Query *) lfirst(query_list_item);
Plan *plan;
/* We can't generate plans for utility statements. */
if (query->commandType == CMD_UTILITY)
@@ -97,10 +97,10 @@ PrepareQuery(PrepareStmt *stmt)
void
ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
{
QueryHashEntry *entry;
List *l,
*query_list,
*plan_list;
QueryHashEntry *entry;
List *l,
*query_list,
*plan_list;
ParamListInfo paramLI = NULL;
/* Look it up in the hash table */
@@ -115,8 +115,8 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
/* Evaluate parameters, if any */
if (entry->argtype_list != NIL)
{
int nargs = length(entry->argtype_list);
int i = 0;
int nargs = length(entry->argtype_list);
int i = 0;
ExprContext *econtext = MakeExprContext(NULL, CurrentMemoryContext);
/* Parser should have caught this error, but check */
@@ -126,10 +126,10 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
paramLI = (ParamListInfo) palloc((nargs + 1) * sizeof(ParamListInfoData));
MemSet(paramLI, 0, (nargs + 1) * sizeof(ParamListInfoData));
foreach (l, stmt->params)
foreach(l, stmt->params)
{
Node *n = lfirst(l);
bool isNull;
Node *n = lfirst(l);
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
econtext,
@@ -147,9 +147,9 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
/* Execute each query */
foreach(l, query_list)
{
Query *query = lfirst(l);
Plan *plan = lfirst(plan_list);
bool is_last_query;
Query *query = lfirst(l);
Plan *plan = lfirst(plan_list);
bool is_last_query;
plan_list = lnext(plan_list);
is_last_query = (plan_list == NIL);
@@ -158,8 +158,8 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
ProcessUtility(query->utilityStmt, outputDest, NULL);
else
{
QueryDesc *qdesc;
EState *state;
QueryDesc *qdesc;
EState *state;
if (Show_executor_stats)
ResetUsage();
@@ -185,11 +185,11 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
}
/*
* If we're processing multiple queries, we need to increment
* the command counter between them. For the last query,
* there's no need to do this, it's done automatically.
* If we're processing multiple queries, we need to increment the
* command counter between them. For the last query, there's no
* need to do this, it's done automatically.
*/
if (! is_last_query)
if (!is_last_query)
CommandCounterIncrement();
}
@@ -202,7 +202,7 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
static void
InitQueryHashTable(void)
{
HASHCTL hash_ctl;
HASHCTL hash_ctl;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
@@ -229,9 +229,9 @@ StoreQuery(const char *stmt_name, List *query_list, List *plan_list,
{
QueryHashEntry *entry;
MemoryContext oldcxt,
entrycxt;
char key[HASH_KEY_LEN];
bool found;
entrycxt;
char key[HASH_KEY_LEN];
bool found;
/* Initialize the hash table, if necessary */
if (!prepared_queries)
@@ -258,10 +258,10 @@ StoreQuery(const char *stmt_name, List *query_list, List *plan_list,
oldcxt = MemoryContextSwitchTo(entrycxt);
/*
* We need to copy the data so that it is stored in the correct
* memory context. Do this before making hashtable entry, so that
* an out-of-memory failure only wastes memory and doesn't leave us
* with an incomplete (ie corrupt) hashtable entry.
* We need to copy the data so that it is stored in the correct memory
* context. Do this before making hashtable entry, so that an
* out-of-memory failure only wastes memory and doesn't leave us with
* an incomplete (ie corrupt) hashtable entry.
*/
query_list = (List *) copyObject(query_list);
plan_list = (List *) copyObject(plan_list);
@@ -293,7 +293,7 @@ StoreQuery(const char *stmt_name, List *query_list, List *plan_list,
static QueryHashEntry *
FetchQuery(const char *plan_name)
{
char key[HASH_KEY_LEN];
char key[HASH_KEY_LEN];
QueryHashEntry *entry;
/*
@@ -306,8 +306,8 @@ FetchQuery(const char *plan_name)
/*
* We can't just use the statement name as supplied by the user: the
* hash package is picky enough that it needs to be NULL-padded out
* to the appropriate length to work correctly.
* hash package is picky enough that it needs to be NULL-padded out to
* the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, plan_name, sizeof(key));
@@ -344,7 +344,7 @@ FetchQueryParams(const char *plan_name)
static void
RunQuery(QueryDesc *qdesc, EState *state)
{
TupleDesc tupdesc;
TupleDesc tupdesc;
tupdesc = ExecutorStart(qdesc, state);
@@ -363,7 +363,7 @@ RunQuery(QueryDesc *qdesc, EState *state)
void
DeallocateQuery(DeallocateStmt *stmt)
{
char key[HASH_KEY_LEN];
char key[HASH_KEY_LEN];
QueryHashEntry *entry;
/*
@@ -376,18 +376,18 @@ DeallocateQuery(DeallocateStmt *stmt)
/*
* We can't just use the statement name as supplied by the user: the
* hash package is picky enough that it needs to be NULL-padded out
* to the appropriate length to work correctly.
* hash package is picky enough that it needs to be NULL-padded out to
* the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, stmt->name, sizeof(key));
/*
* First lookup the entry, so we can release all the subsidiary memory
* it has allocated (when it's removed, hash_search() will return
* a dangling pointer, so it needs to be done prior to HASH_REMOVE).
* This requires an extra hash-table lookup, but DEALLOCATE
* isn't exactly a performance bottleneck.
* it has allocated (when it's removed, hash_search() will return a
* dangling pointer, so it needs to be done prior to HASH_REMOVE).
* This requires an extra hash-table lookup, but DEALLOCATE isn't
* exactly a performance bottleneck.
*/
entry = (QueryHashEntry *) hash_search(prepared_queries,
key,

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.41 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.42 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,8 @@ void
CreateProceduralLanguage(CreatePLangStmt *stmt)
{
char languageName[NAMEDATALEN];
Oid procOid, valProcOid;
Oid procOid,
valProcOid;
Oid typev[FUNC_MAX_ARGS];
char nulls[Natts_pg_language];
Datum values[Natts_pg_language];
@@ -49,8 +50,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
HeapTuple tup;
TupleDesc tupDesc;
int i;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/*
* Check permission

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.5 2002/07/18 16:47:24 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.6 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,15 +61,17 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
owner_name = authId;
/* The following will error out if user does not exist */
owner_userid = get_usesysid(owner_name);
/*
* Set the current user to the requested authorization so
* that objects created in the statement have the requested
* owner. (This will revert to session user on error or at
* the end of this routine.)
* Set the current user to the requested authorization so that
* objects created in the statement have the requested owner.
* (This will revert to session user on error or at the end of
* this routine.)
*/
SetUserId(owner_userid);
}
else /* not superuser */
else
/* not superuser */
{
owner_userid = saved_userid;
owner_name = GetUserNameFromId(owner_userid);
@@ -98,17 +100,17 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
/*
* Temporarily make the new namespace be the front of the search path,
* as well as the default creation target namespace. This will be undone
* at the end of this routine, or upon error.
* as well as the default creation target namespace. This will be
* undone at the end of this routine, or upon error.
*/
PushSpecialNamespace(namespaceId);
/*
* Examine the list of commands embedded in the CREATE SCHEMA command,
* and reorganize them into a sequentially executable order with no
* forward references. Note that the result is still a list of raw
* parsetrees in need of parse analysis --- we cannot, in general,
* run analyze.c on one statement until we have actually executed the
* forward references. Note that the result is still a list of raw
* parsetrees in need of parse analysis --- we cannot, in general, run
* analyze.c on one statement until we have actually executed the
* prior ones.
*/
parsetree_list = analyzeCreateSchemaStmt(stmt);
@@ -171,12 +173,12 @@ RemoveSchema(List *names, DropBehavior behavior)
aclcheck_error(ACLCHECK_NOT_OWNER, namespaceName);
/*
* Do the deletion. Objects contained in the schema are removed
* by means of their dependency links to the schema.
* Do the deletion. Objects contained in the schema are removed by
* means of their dependency links to the schema.
*
* XXX currently, index opclasses don't have creation/deletion
* commands, so they will not get removed when the containing
* schema is removed. This is annoying but not fatal.
* XXX currently, index opclasses don't have creation/deletion commands,
* so they will not get removed when the containing schema is removed.
* This is annoying but not fatal.
*/
object.classId = get_system_catalog_relid(NamespaceRelationName);
object.objectId = namespaceId;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.86 2002/09/03 18:50:54 petere Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.87 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ typedef struct sequence_magic
* rely on the relcache, since it's only, well, a cache, and may decide to
* discard entries.)
*
* XXX We use linear search to find pre-existing SeqTable entries. This is
* XXX We use linear search to find pre-existing SeqTable entries. This is
* good when only a small number of sequences are touched in a session, but
* would suck with many different sequences. Perhaps use a hashtable someday.
*/
@@ -81,9 +81,9 @@ static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
static void init_sequence(const char *caller, RangeVar *relation,
SeqTable *p_elm, Relation *p_rel);
SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(const char *caller, SeqTable elm,
Relation rel, Buffer *buf);
Relation rel, Buffer *buf);
static void init_params(CreateSeqStmt *seq, Form_pg_sequence new);
static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
@@ -226,15 +226,15 @@ DefineSequence(CreateSeqStmt *seq)
* Two special hacks here:
*
* 1. Since VACUUM does not process sequences, we have to force the tuple
* to have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* to have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*
* 2. Even though heap_insert emitted a WAL log record, we have to emit
* an XLOG_SEQ_LOG record too, since (a) the heap_insert record will
* not have the right xmin, and (b) REDO of the heap_insert record
* would re-init page and sequence magic number would be lost. This
* would re-init page and sequence magic number would be lost. This
* means two log records instead of one :-(
*/
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -243,11 +243,12 @@ DefineSequence(CreateSeqStmt *seq)
{
/*
* Note that the "tuple" structure is still just a local tuple record
* created by heap_formtuple; its t_data pointer doesn't point at the
* disk buffer. To scribble on the disk buffer we need to fetch the
* item pointer. But do the same to the local tuple, since that will
* be the source for the WAL log record, below.
* Note that the "tuple" structure is still just a local tuple
* record created by heap_formtuple; its t_data pointer doesn't
* point at the disk buffer. To scribble on the disk buffer we
* need to fetch the item pointer. But do the same to the local
* tuple, since that will be the source for the WAL log record,
* below.
*/
ItemId itemId;
Item item;
@@ -323,7 +324,7 @@ nextval(PG_FUNCTION_ARGS)
bool logit = false;
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
"nextval"));
"nextval"));
/* open and AccessShareLock sequence */
init_sequence("nextval", sequence, &elm, &seqrel);
@@ -358,14 +359,14 @@ nextval(PG_FUNCTION_ARGS)
}
/*
* Decide whether we should emit a WAL log record. If so, force up
* Decide whether we should emit a WAL log record. If so, force up
* the fetch count to grab SEQ_LOG_VALS more values than we actually
* need to cache. (These will then be usable without logging.)
*
* If this is the first nextval after a checkpoint, we must force
* a new WAL record to be written anyway, else replay starting from the
* If this is the first nextval after a checkpoint, we must force a new
* WAL record to be written anyway, else replay starting from the
* checkpoint would fail to advance the sequence past the logged
* values. In this case we may as well fetch extra values.
* values. In this case we may as well fetch extra values.
*/
if (log < fetch)
{
@@ -401,7 +402,8 @@ nextval(PG_FUNCTION_ARGS)
break; /* stop fetching */
if (!seq->is_cycled)
{
char buf[100];
char buf[100];
snprintf(buf, 100, INT64_FORMAT, maxv);
elog(ERROR, "%s.nextval: reached MAXVALUE (%s)",
sequence->relname, buf);
@@ -421,7 +423,8 @@ nextval(PG_FUNCTION_ARGS)
break; /* stop fetching */
if (!seq->is_cycled)
{
char buf[100];
char buf[100];
snprintf(buf, 100, INT64_FORMAT, minv);
elog(ERROR, "%s.nextval: reached MINVALUE (%s)",
sequence->relname, buf);
@@ -507,7 +510,7 @@ currval(PG_FUNCTION_ARGS)
int64 result;
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
"currval"));
"currval"));
/* open and AccessShareLock sequence */
init_sequence("currval", sequence, &elm, &seqrel);
@@ -560,7 +563,10 @@ do_setval(RangeVar *sequence, int64 next, bool iscalled)
if ((next < seq->min_value) || (next > seq->max_value))
{
char bufv[100], bufm[100], bufx[100];
char bufv[100],
bufm[100],
bufx[100];
snprintf(bufv, 100, INT64_FORMAT, next);
snprintf(bufm, 100, INT64_FORMAT, seq->min_value);
snprintf(bufx, 100, INT64_FORMAT, seq->max_value);
@@ -632,7 +638,7 @@ setval(PG_FUNCTION_ARGS)
RangeVar *sequence;
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
"setval"));
"setval"));
do_setval(sequence, next, true);
@@ -652,7 +658,7 @@ setval_and_iscalled(PG_FUNCTION_ARGS)
RangeVar *sequence;
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
"setval"));
"setval"));
do_setval(sequence, next, iscalled);
@@ -672,7 +678,7 @@ init_sequence(const char *caller, RangeVar *relation,
TransactionId thisxid = GetCurrentTransactionId();
SeqTable elm;
Relation seqrel;
/* Look to see if we already have a seqtable entry for relation */
for (elm = seqtab; elm != NULL; elm = elm->next)
{
@@ -697,9 +703,9 @@ init_sequence(const char *caller, RangeVar *relation,
* Allocate new seqtable entry if we didn't find one.
*
* NOTE: seqtable entries remain in the list for the life of a backend.
* If the sequence itself is deleted then the entry becomes wasted memory,
* but it's small enough that this should not matter.
*/
* If the sequence itself is deleted then the entry becomes wasted
* memory, but it's small enough that this should not matter.
*/
if (elm == NULL)
{
/*
@@ -828,7 +834,9 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
if (new->min_value >= new->max_value)
{
char bufm[100], bufx[100];
char bufm[100],
bufx[100];
snprintf(bufm, 100, INT64_FORMAT, new->min_value);
snprintf(bufx, 100, INT64_FORMAT, new->max_value);
elog(ERROR, "DefineSequence: MINVALUE (%s) must be less than MAXVALUE (%s)",
@@ -847,7 +855,9 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
if (new->last_value < new->min_value)
{
char bufs[100], bufm[100];
char bufs[100],
bufm[100];
snprintf(bufs, 100, INT64_FORMAT, new->last_value);
snprintf(bufm, 100, INT64_FORMAT, new->min_value);
elog(ERROR, "DefineSequence: START value (%s) can't be less than MINVALUE (%s)",
@@ -855,7 +865,9 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
}
if (new->last_value > new->max_value)
{
char bufs[100], bufm[100];
char bufs[100],
bufm[100];
snprintf(bufs, 100, INT64_FORMAT, new->last_value);
snprintf(bufm, 100, INT64_FORMAT, new->max_value);
elog(ERROR, "DefineSequence: START value (%s) can't be greater than MAXVALUE (%s)",
@@ -866,7 +878,8 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
new->cache_value = 1;
else if ((new->cache_value = defGetInt64(cache_value)) <= 0)
{
char buf[100];
char buf[100];
snprintf(buf, 100, INT64_FORMAT, new->cache_value);
elog(ERROR, "DefineSequence: CACHE (%s) can't be <= 0",
buf);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.38 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.39 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,11 +60,11 @@ static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static void CheckTupleType(Form_pg_class tuple_class);
static bool needs_toast_table(Relation rel);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static Oid createForeignKeyConstraint(Relation rel, Relation pkrel,
FkConstraint *fkconstraint);
Relation rel, Relation pkrel);
static Oid createForeignKeyConstraint(Relation rel, Relation pkrel,
FkConstraint *fkconstraint);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
Oid constrOid);
Oid constrOid);
static char *fkMatchTypeToString(char match_type);
/* Used by attribute and relation renaming routines: */
@@ -114,9 +114,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
/*
* Look up the namespace in which we are supposed to create the
* relation. Check we have permission to create there.
* Skip check if bootstrapping, since permissions machinery may not
* be working yet.
* relation. Check we have permission to create there. Skip check if
* bootstrapping, since permissions machinery may not be working yet.
*/
namespaceId = RangeVarGetCreationNamespace(stmt->relation);
@@ -136,7 +135,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
*/
schema = MergeAttributes(schema, stmt->inhRelations,
stmt->relation->istemp,
&inheritOids, &old_constraints, &parentHasOids);
&inheritOids, &old_constraints, &parentHasOids);
numberOfAttributes = length(schema);
if (numberOfAttributes <= 0)
@@ -180,10 +179,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
else
{
/*
* Generate a constraint name. NB: this should match the
* Generate a constraint name. NB: this should match the
* form of names that GenerateConstraintName() may produce
* for names added later. We are assured that there is
* no name conflict, because MergeAttributes() did not pass
* for names added later. We are assured that there is no
* name conflict, because MergeAttributes() did not pass
* back any names of this form.
*/
check[ncheck].ccname = (char *) palloc(NAMEDATALEN);
@@ -242,8 +241,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
* CREATE TABLE.
*
* Another task that's conveniently done at this step is to add
* dependency links between columns and supporting relations (such
* as SERIAL sequences).
* dependency links between columns and supporting relations (such as
* SERIAL sequences).
*
* First, scan schema to find new column defaults.
*/
@@ -271,7 +270,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (colDef->support != NULL)
{
/* Create dependency for supporting relation for this column */
ObjectAddress colobject,
ObjectAddress colobject,
suppobject;
colobject.classId = RelOid_pg_class;
@@ -334,9 +333,9 @@ TruncateRelation(const RangeVar *relation)
Relation rel;
Oid relid;
Oid toastrelid;
ScanKeyData key;
Relation fkeyRel;
SysScanDesc fkeyScan;
ScanKeyData key;
Relation fkeyRel;
SysScanDesc fkeyScan;
HeapTuple tuple;
/* Grab exclusive lock in preparation for truncate */
@@ -366,8 +365,7 @@ TruncateRelation(const RangeVar *relation)
aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
/*
* Don't allow truncate on tables which are referenced
* by foreign keys
* Don't allow truncate on tables which are referenced by foreign keys
*/
fkeyRel = heap_openr(ConstraintRelationName, AccessShareLock);
@@ -380,8 +378,8 @@ TruncateRelation(const RangeVar *relation)
SnapshotNow, 1, &key);
/*
* First foreign key found with us as the reference
* should throw an error.
* First foreign key found with us as the reference should throw an
* error.
*/
while (HeapTupleIsValid(tuple = systable_getnext(fkeyScan)))
{
@@ -554,7 +552,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/*
* newattno[] will contain the child-table attribute numbers for
* the attributes of this parent table. (They are not the same
* for parents after the first one, nor if we have dropped columns.)
* for parents after the first one, nor if we have dropped
* columns.)
*/
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
@@ -572,9 +571,10 @@ MergeAttributes(List *schema, List *supers, bool istemp,
*/
if (attribute->attisdropped)
{
/*
* change_varattnos_of_a_node asserts that this is greater than
* zero, so if anything tries to use it, we should find out.
/*
* change_varattnos_of_a_node asserts that this is greater
* than zero, so if anything tries to use it, we should
* find out.
*/
newattno[parent_attno - 1] = 0;
continue;
@@ -684,6 +684,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
Node *expr;
cdef->contype = CONSTR_CHECK;
/*
* Do not inherit generated constraint names, since they
* might conflict across multiple inheritance parents.
@@ -857,8 +858,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
return;
/*
* Store INHERITS information in pg_inherits using direct ancestors only.
* Also enter dependencies on the direct ancestors.
* Store INHERITS information in pg_inherits using direct ancestors
* only. Also enter dependencies on the direct ancestors.
*/
relation = heap_openr(InheritsRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -1076,7 +1077,7 @@ renameatt(Oid myrelid,
*
* normally, only the owner of a class can change its schema.
*/
if (!allowSystemTableMods
if (!allowSystemTableMods
&& IsSystemRelation(targetrelation))
elog(ERROR, "renameatt: class \"%s\" is a system catalog",
RelationGetRelationName(targetrelation));
@@ -1141,8 +1142,8 @@ renameatt(Oid myrelid,
oldattname);
/*
* if the attribute is inherited, forbid the renaming, unless we
* are already inside a recursive rename.
* if the attribute is inherited, forbid the renaming, unless we are
* already inside a recursive rename.
*/
if (attform->attisinherited && !recursing)
elog(ERROR, "renameatt: inherited attribute \"%s\" may not be renamed",
@@ -1233,7 +1234,8 @@ renameatt(Oid myrelid,
true, false);
}
relation_close(targetrelation, NoLock); /* close rel but keep lock! */
relation_close(targetrelation, NoLock); /* close rel but keep
* lock! */
}
/*
@@ -1382,7 +1384,7 @@ update_ri_trigger_args(Oid relid,
{
Relation tgrel;
ScanKeyData skey[1];
SysScanDesc trigscan;
SysScanDesc trigscan;
HeapTuple tuple;
Datum values[Natts_pg_trigger];
char nulls[Natts_pg_trigger];
@@ -1577,8 +1579,8 @@ AlterTableAddColumn(Oid myrelid,
HeapTuple typeTuple;
Form_pg_type tform;
int attndims;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
/*
* Grab an exclusive lock on the target table, which we will NOT
@@ -1666,7 +1668,7 @@ AlterTableAddColumn(Oid myrelid,
if (colDef->is_not_null)
elog(ERROR, "Adding NOT NULL columns is not implemented."
"\n\tAdd the column, then use ALTER TABLE ... SET NOT NULL.");
"\n\tAdd the column, then use ALTER TABLE ... SET NOT NULL.");
pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1678,8 +1680,9 @@ AlterTableAddColumn(Oid myrelid,
RelationGetRelationName(rel));
/*
* this test is deliberately not attisdropped-aware, since if one tries
* to add a column matching a dropped column name, it's gonna fail anyway.
* this test is deliberately not attisdropped-aware, since if one
* tries to add a column matching a dropped column name, it's gonna
* fail anyway.
*/
if (SearchSysCacheExists(ATTNAME,
ObjectIdGetDatum(myrelid),
@@ -1706,7 +1709,7 @@ AlterTableAddColumn(Oid myrelid,
tform = (Form_pg_type) GETSTRUCT(typeTuple);
attributeTuple = heap_addheader(Natts_pg_attribute,
false,
false,
ATTRIBUTE_TUPLE_SIZE,
(void *) &attributeD);
@@ -1806,8 +1809,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
HeapTuple tuple;
AttrNumber attnum;
Relation attr_rel;
List *indexoidlist;
List *indexoidscan;
List *indexoidlist;
List *indexoidscan;
rel = heap_open(myrelid, AccessExclusiveLock);
@@ -1874,10 +1877,10 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
foreach(indexoidscan, indexoidlist)
{
Oid indexoid = lfirsti(indexoidscan);
Oid indexoid = lfirsti(indexoidscan);
HeapTuple indexTuple;
Form_pg_index indexStruct;
int i;
Form_pg_index indexStruct;
int i;
indexTuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -1891,11 +1894,11 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (indexStruct->indisprimary)
{
/*
* Loop over each attribute in the primary key and
* see if it matches the to-be-altered attribute
* Loop over each attribute in the primary key and see if it
* matches the to-be-altered attribute
*/
for (i = 0; i < INDEX_MAX_KEYS &&
indexStruct->indkey[i] != InvalidAttrNumber; i++)
indexStruct->indkey[i] != InvalidAttrNumber; i++)
{
if (indexStruct->indkey[i] == attnum)
elog(ERROR, "ALTER TABLE: Attribute \"%s\" is in a primary key", colName);
@@ -1913,7 +1916,7 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
attr_rel = heap_openr(AttributeRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopyAttName(myrelid, colName);
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
RelationGetRelationName(rel), colName);
@@ -1940,7 +1943,7 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
HeapTuple tuple;
AttrNumber attnum;
Relation attr_rel;
HeapScanDesc scan;
HeapScanDesc scan;
TupleDesc tupdesc;
rel = heap_open(myrelid, AccessExclusiveLock);
@@ -2000,8 +2003,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
colName);
/*
* Perform a scan to ensure that there are no NULL
* values already in the relation
* Perform a scan to ensure that there are no NULL values already in
* the relation
*/
tupdesc = RelationGetDescr(rel);
@@ -2009,7 +2012,7 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Datum d;
Datum d;
bool isnull;
d = heap_getattr(tuple, attnum, tupdesc, &isnull);
@@ -2027,7 +2030,7 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
attr_rel = heap_openr(AttributeRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopyAttName(myrelid, colName);
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
RelationGetRelationName(rel), colName);
@@ -2153,7 +2156,7 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
{
Relation rel;
int newtarget = 1;
char newstorage = 'p';
char newstorage = 'p';
Relation attrelation;
HeapTuple tuple;
Form_pg_attribute attrtuple;
@@ -2200,7 +2203,7 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
else if (*flagType == 'M')
{
/* STORAGE */
char *storagemode;
char *storagemode;
Assert(IsA(flagValue, String));
storagemode = strVal(flagValue);
@@ -2246,7 +2249,7 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (childrelid == myrelid)
continue;
AlterTableAlterColumnFlags(childrelid,
false, colName, flagValue, flagType);
false, colName, flagValue, flagType);
}
}
@@ -2263,6 +2266,7 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (attrtuple->attnum < 0)
elog(ERROR, "ALTER TABLE: cannot change system attribute \"%s\"",
colName);
/*
* Now change the appropriate field
*/
@@ -2306,7 +2310,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
AttrNumber n;
TupleDesc tupleDesc;
bool success;
ObjectAddress object;
ObjectAddress object;
rel = heap_open(myrelid, AccessExclusiveLock);
@@ -2336,8 +2340,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName);
/*
* Make sure there will be at least one user column left in the relation
* after we drop this one. Zero-length tuples tend to confuse us.
* Make sure there will be at least one user column left in the
* relation after we drop this one. Zero-length tuples tend to
* confuse us.
*/
tupleDesc = RelationGetDescr(rel);
@@ -2355,7 +2360,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
if (!success)
elog(ERROR, "ALTER TABLE: Cannot drop last column from table \"%s\"",
RelationGetRelationName(rel));
RelationGetRelationName(rel));
/* Don't drop inherited columns */
if (tupleDesc->attrs[attnum - 1]->attisinherited && !recursing)
@@ -2363,8 +2368,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName);
/*
* If we are asked to drop ONLY in this table (no recursion),
* we need to mark the inheritors' attribute as non-inherited.
* If we are asked to drop ONLY in this table (no recursion), we need
* to mark the inheritors' attribute as non-inherited.
*/
if (!recurse && !recursing)
{
@@ -2378,14 +2383,14 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
attr_rel = heap_openr(AttributeRelationName, RowExclusiveLock);
foreach(child, children)
{
Oid childrelid = lfirsti(child);
Relation childrel;
Oid childrelid = lfirsti(child);
Relation childrel;
HeapTuple tuple;
childrel = heap_open(childrelid, AccessExclusiveLock);
tuple = SearchSysCacheCopyAttName(childrelid, colName);
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "ALTER TABLE: relation %u has no column \"%s\"",
childrelid, colName);
@@ -2407,7 +2412,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
if (recurse)
{
List *child,
*children;
*children;
/* this routine is actually in the planner */
children = find_all_inheritors(myrelid);
@@ -2495,8 +2500,8 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
foreach(listptr, newConstraints)
{
/*
* copy is because we may destructively alter the node below
* by inserting a generated name; this name is not necessarily
* copy is because we may destructively alter the node below by
* inserting a generated name; this name is not necessarily
* correct for children or parents.
*/
Node *newConstraint = copyObject(lfirst(listptr));
@@ -2533,16 +2538,16 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
if (constr->name)
{
if (ConstraintNameIsUsed(RelationGetRelid(rel),
RelationGetNamespace(rel),
constr->name))
RelationGetNamespace(rel),
constr->name))
elog(ERROR, "constraint \"%s\" already exists for relation \"%s\"",
constr->name,
RelationGetRelationName(rel));
RelationGetRelationName(rel));
}
else
constr->name = GenerateConstraintName(RelationGetRelid(rel),
RelationGetNamespace(rel),
&counter);
RelationGetNamespace(rel),
&counter);
/*
* We need to make a parse state and range
@@ -2552,8 +2557,8 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(pstate,
myrelid,
makeAlias(RelationGetRelationName(rel), NIL),
myrelid,
makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, true, true);
@@ -2657,23 +2662,23 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
if (fkconstraint->constr_name)
{
if (ConstraintNameIsUsed(RelationGetRelid(rel),
RelationGetNamespace(rel),
fkconstraint->constr_name))
RelationGetNamespace(rel),
fkconstraint->constr_name))
elog(ERROR, "constraint \"%s\" already exists for relation \"%s\"",
fkconstraint->constr_name,
RelationGetRelationName(rel));
}
else
fkconstraint->constr_name = GenerateConstraintName(RelationGetRelid(rel),
RelationGetNamespace(rel),
&counter);
RelationGetNamespace(rel),
&counter);
/*
* Grab an exclusive lock on the pk table, so that
* someone doesn't delete rows out from under us.
* (Although a lesser lock would do for that purpose,
* we'll need exclusive lock anyway to add triggers
* to the pk table; trying to start with a lesser lock
* we'll need exclusive lock anyway to add triggers to
* the pk table; trying to start with a lesser lock
* will just create a risk of deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable,
@@ -2716,12 +2721,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
fkconstraint);
/*
* Create the triggers that will enforce the constraint.
* Create the triggers that will enforce the
* constraint.
*/
createForeignKeyTriggers(rel, fkconstraint, constrOid);
/*
* Close pk table, but keep lock until we've committed.
* Close pk table, but keep lock until we've
* committed.
*/
heap_close(pkrel, NoLock);
@@ -2754,10 +2761,9 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint,
int count;
/*
* Scan through each tuple, calling RI_FKey_check_ins
* (insert trigger) as if that tuple had just been
* inserted. If any of those fail, it should
* elog(ERROR) and that's that.
* Scan through each tuple, calling RI_FKey_check_ins (insert trigger)
* as if that tuple had just been inserted. If any of those fail, it
* should elog(ERROR) and that's that.
*/
MemSet(&trig, 0, sizeof(trig));
trig.tgoid = InvalidOid;
@@ -2848,7 +2854,7 @@ createForeignKeyConstraint(Relation rel, Relation pkrel,
i = 0;
foreach(l, fkconstraint->fk_attrs)
{
char *id = strVal(lfirst(l));
char *id = strVal(lfirst(l));
AttrNumber attno;
attno = get_attnum(RelationGetRelid(rel), id);
@@ -2864,7 +2870,7 @@ createForeignKeyConstraint(Relation rel, Relation pkrel,
i = 0;
foreach(l, fkconstraint->pk_attrs)
{
char *id = strVal(lfirst(l));
char *id = strVal(lfirst(l));
AttrNumber attno;
attno = get_attnum(RelationGetRelid(pkrel), id);
@@ -2883,14 +2889,14 @@ createForeignKeyConstraint(Relation rel, Relation pkrel,
RelationGetRelid(rel),
fkattr,
fkcount,
InvalidOid, /* not a domain constraint */
InvalidOid, /* not a domain constraint */
RelationGetRelid(pkrel),
pkattr,
pkcount,
fkconstraint->fk_upd_action,
fkconstraint->fk_del_action,
fkconstraint->fk_matchtype,
NULL, /* no check constraint */
NULL, /* no check constraint */
NULL,
NULL);
}
@@ -2910,7 +2916,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
constrobj;
/*
* Reconstruct a RangeVar for my relation (not passed in, unfortunately).
* Reconstruct a RangeVar for my relation (not passed in,
* unfortunately).
*/
myRel = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)),
RelationGetRelationName(rel));
@@ -2956,9 +2963,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
makeString(fkconstraint->pktable->relname));
makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
if (length(fk_attr) != length(pk_attr))
@@ -2983,8 +2990,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
CommandCounterIncrement();
/*
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the
* ON DELETE action on the referenced table.
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON
* DELETE action on the referenced table.
*/
fk_trigger = makeNode(CreateTrigStmt);
fk_trigger->trigname = fkconstraint->constr_name;
@@ -3032,9 +3039,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
makeString(fkconstraint->pktable->relname));
makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
while (fk_attr != NIL)
@@ -3054,8 +3061,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
CommandCounterIncrement();
/*
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the
* ON UPDATE action on the referenced table.
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON
* UPDATE action on the referenced table.
*/
fk_trigger = makeNode(CreateTrigStmt);
fk_trigger->trigname = fkconstraint->constr_name;
@@ -3103,9 +3110,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
makeString(fkconstraint->pktable->relname));
makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
while (fk_attr != NIL)
@@ -3129,7 +3136,7 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
static char *
fkMatchTypeToString(char match_type)
{
switch (match_type)
switch (match_type)
{
case FKCONSTR_MATCH_FULL:
return pstrdup("FULL");
@@ -3227,10 +3234,10 @@ AlterTableDropConstraint(Oid myrelid, bool recurse,
void
AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
{
Relation target_rel;
Relation class_rel;
HeapTuple tuple;
Form_pg_class tuple_class;
Relation target_rel;
Relation class_rel;
HeapTuple tuple;
Form_pg_class tuple_class;
/* Get exclusive lock till end of transaction on the target table */
/* Use relation_open here so that we work on indexes... */
@@ -3250,8 +3257,8 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
CheckTupleType(tuple_class);
/*
* Okay, this is a valid tuple: change its ownership and
* write to the heap.
* Okay, this is a valid tuple: change its ownership and write to the
* heap.
*/
tuple_class->relowner = newOwnerSysId;
simple_heap_update(class_rel, &tuple->t_self, tuple);
@@ -3267,16 +3274,15 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
if (tuple_class->relkind == RELKIND_RELATION ||
tuple_class->relkind == RELKIND_TOASTVALUE)
{
List *index_oid_list, *i;
List *index_oid_list,
*i;
/* Find all the indexes belonging to this relation */
index_oid_list = RelationGetIndexList(target_rel);
/* For each index, recursively change its ownership */
foreach(i, index_oid_list)
{
AlterTableOwner(lfirsti(i), newOwnerSysId);
}
freeList(index_oid_list);
}
@@ -3285,9 +3291,7 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
{
/* If it has a toast table, recurse to change its ownership */
if (tuple_class->reltoastrelid != InvalidOid)
{
AlterTableOwner(tuple_class->reltoastrelid, newOwnerSysId);
}
}
heap_freetuple(tuple);
@@ -3355,7 +3359,7 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
* We cannot allow toasting a shared relation after initdb (because
* there's no way to mark it toasted in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
* standalone backend. However, we can at least prevent this mistake
* standalone backend. However, we can at least prevent this mistake
* under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
@@ -3453,10 +3457,11 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
tupdesc->attrs[2]->attstorage = 'p';
/*
* Note: the toast relation is placed in the regular pg_toast namespace
* even if its master relation is a temp table. There cannot be any
* naming collision, and the toast rel will be destroyed when its master
* is, so there's no need to handle the toast rel as temp.
* Note: the toast relation is placed in the regular pg_toast
* namespace even if its master relation is a temp table. There
* cannot be any naming collision, and the toast rel will be destroyed
* when its master is, so there's no need to handle the toast rel as
* temp.
*/
toast_relid = heap_create_with_catalog(toast_relname,
PG_TOAST_NAMESPACE,
@@ -3471,12 +3476,12 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Create unique index on chunk_id, chunk_seq.
*
* NOTE: the normal TOAST access routines could actually function with
* a single-column index on chunk_id only. However, the slice access
* NOTE: the normal TOAST access routines could actually function with a
* single-column index on chunk_id only. However, the slice access
* routines use both columns for faster access to an individual chunk.
* In addition, we want it to be unique as a check against the
* In addition, we want it to be unique as a check against the
* possibility of duplicate TOAST chunk OIDs. The index might also be
* a little more efficient this way, since btree isn't all that happy
* a little more efficient this way, since btree isn't all that happy
* with large numbers of equal keys.
*/
@@ -3516,8 +3521,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
heap_freetuple(reltup);
/*
* Register dependency from the toast table to the master, so that
* the toast table will be deleted if the master is.
* Register dependency from the toast table to the master, so that the
* toast table will be deleted if the master is.
*/
baseobject.classId = RelOid_pg_class;
baseobject.objectId = relOid;

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.130 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.131 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
* Create a trigger. Returns the OID of the created trigger.
*
* forConstraint, if true, says that this trigger is being created to
* implement a constraint. The caller will then be expected to make
* implement a constraint. The caller will then be expected to make
* a pg_depend entry linking the trigger to that constraint (and thereby
* to the owning relation(s)).
*/
@@ -69,7 +69,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
Relation rel;
AclResult aclresult;
Relation tgrel;
SysScanDesc tgscan;
SysScanDesc tgscan;
ScanKeyData key;
Relation pgrel;
HeapTuple tuple;
@@ -82,8 +82,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
char *trigname;
char *constrname;
Oid constrrelid;
ObjectAddress myself,
referenced;
ObjectAddress myself,
referenced;
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
@@ -98,7 +98,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (!allowSystemTableMods && IsSystemRelation(rel))
elog(ERROR, "CreateTrigger: can't create trigger for system relation %s",
stmt->relation->relname);
stmt->relation->relname);
/* permission checks */
@@ -132,9 +132,9 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
/*
* If trigger is an RI constraint, use specified trigger name as
* constraint name and build a unique trigger name instead.
* This is mainly for backwards compatibility with CREATE CONSTRAINT
* TRIGGER commands.
* constraint name and build a unique trigger name instead. This is
* mainly for backwards compatibility with CREATE CONSTRAINT TRIGGER
* commands.
*/
if (stmt->isconstraint)
{
@@ -183,10 +183,10 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
}
/*
* Scan pg_trigger for existing triggers on relation. We do this mainly
* because we must count them; a secondary benefit is to give a nice
* error message if there's already a trigger of the same name. (The
* unique index on tgrelid/tgname would complain anyway.)
* Scan pg_trigger for existing triggers on relation. We do this
* mainly because we must count them; a secondary benefit is to give a
* nice error message if there's already a trigger of the same name.
* (The unique index on tgrelid/tgname would complain anyway.)
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
@@ -241,13 +241,13 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
CStringGetDatum(trigname));
CStringGetDatum(trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
values[Anum_pg_trigger_tgisconstraint - 1] = BoolGetDatum(stmt->isconstraint);
values[Anum_pg_trigger_tgconstrname - 1] = DirectFunctionCall1(namein,
CStringGetDatum(constrname));
CStringGetDatum(constrname));
values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
@@ -354,8 +354,9 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
* CREATE TRIGGER command, also make trigger be auto-dropped if its
* relation is dropped or if the FK relation is dropped. (Auto drop
* is compatible with our pre-7.3 behavior.) If the trigger is being
* made for a constraint, we can skip the relation links; the dependency
* on the constraint will indirectly depend on the relations.
* made for a constraint, we can skip the relation links; the
* dependency on the constraint will indirectly depend on the
* relations.
*/
referenced.classId = RelOid_pg_proc;
referenced.objectId = funcoid;
@@ -389,10 +390,10 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
void
DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
{
Relation tgrel;
ScanKeyData skey[2];
SysScanDesc tgscan;
HeapTuple tup;
Relation tgrel;
ScanKeyData skey[2];
SysScanDesc tgscan;
HeapTuple tup;
ObjectAddress object;
/*
@@ -440,14 +441,14 @@ void
RemoveTriggerById(Oid trigOid)
{
Relation tgrel;
SysScanDesc tgscan;
ScanKeyData skey[1];
SysScanDesc tgscan;
ScanKeyData skey[1];
HeapTuple tup;
Oid relid;
Relation rel;
Relation pgrel;
HeapTuple tuple;
Form_pg_class classForm;
Form_pg_class classForm;
tgrel = heap_openr(TriggerRelationName, RowExclusiveLock);
@@ -495,8 +496,8 @@ RemoveTriggerById(Oid trigOid)
* rebuild relcache entries.
*
* Note this is OK only because we have AccessExclusiveLock on the rel,
* so no one else is creating/deleting triggers on this rel at the same
* time.
* so no one else is creating/deleting triggers on this rel at the
* same time.
*/
pgrel = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -545,7 +546,7 @@ renametrig(Oid relid,
Relation targetrel;
Relation tgrel;
HeapTuple tuple;
SysScanDesc tgscan;
SysScanDesc tgscan;
ScanKeyData key[2];
/*
@@ -555,10 +556,10 @@ renametrig(Oid relid,
targetrel = heap_open(relid, AccessExclusiveLock);
/*
* Scan pg_trigger twice for existing triggers on relation. We do this in
* order to ensure a trigger does not exist with newname (The unique index
* on tgrelid/tgname would complain anyway) and to ensure a trigger does
* exist with oldname.
* Scan pg_trigger twice for existing triggers on relation. We do
* this in order to ensure a trigger does not exist with newname (The
* unique index on tgrelid/tgname would complain anyway) and to ensure
* a trigger does exist with oldname.
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
@@ -601,7 +602,7 @@ renametrig(Oid relid,
/*
* Update pg_trigger tuple with new tgname.
*/
tuple = heap_copytuple(tuple); /* need a modifiable copy */
tuple = heap_copytuple(tuple); /* need a modifiable copy */
namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname, newname);
@@ -611,9 +612,10 @@ renametrig(Oid relid,
CatalogUpdateIndexes(tgrel, tuple);
/*
* Invalidate relation's relcache entry so that other backends (and
* this one too!) are sent SI message to make them rebuild relcache
* entries. (Ideally this should happen automatically...)
* Invalidate relation's relcache entry so that other backends
* (and this one too!) are sent SI message to make them rebuild
* relcache entries. (Ideally this should happen
* automatically...)
*/
CacheInvalidateRelcache(relid);
}
@@ -649,17 +651,17 @@ RelationBuildTriggers(Relation relation)
int found = 0;
Relation tgrel;
ScanKeyData skey;
SysScanDesc tgscan;
SysScanDesc tgscan;
HeapTuple htup;
triggers = (Trigger *) MemoryContextAlloc(CacheMemoryContext,
ntrigs * sizeof(Trigger));
/*
* Note: since we scan the triggers using TriggerRelidNameIndex,
* we will be reading the triggers in name order, except possibly
* during emergency-recovery operations (ie, IsIgnoringSystemIndexes).
* This in turn ensures that triggers will be fired in name order.
* Note: since we scan the triggers using TriggerRelidNameIndex, we
* will be reading the triggers in name order, except possibly during
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
* in turn ensures that triggers will be fired in name order.
*/
ScanKeyEntryInitialize(&skey,
(bits16) 0x0,
@@ -1528,17 +1530,17 @@ deferredTriggerInvokeEvents(bool immediate_only)
/*
* If immediate_only is true, we remove fully-processed events from
* the event queue to recycle space. If immediate_only is false,
* we are going to discard the whole event queue on return anyway,
* so no need to bother with "retail" pfree's.
* the event queue to recycle space. If immediate_only is false, we
* are going to discard the whole event queue on return anyway, so no
* need to bother with "retail" pfree's.
*
* In a scenario with many commands in a transaction and many
* deferred-to-end-of-transaction triggers, it could get annoying
* to rescan all the deferred triggers at each command end.
* To speed this up, we could remember the actual end of the queue at
* EndQuery and examine only events that are newer. On state changes
* we simply reset the saved position to the beginning of the queue
* and process all events once with the new states.
* deferred-to-end-of-transaction triggers, it could get annoying to
* rescan all the deferred triggers at each command end. To speed this
* up, we could remember the actual end of the queue at EndQuery and
* examine only events that are newer. On state changes we simply
* reset the saved position to the beginning of the queue and process
* all events once with the new states.
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1559,8 +1561,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
/*
* Check if event is already completely done.
*/
if (! (event->dte_event & (TRIGGER_DEFERRED_DONE |
TRIGGER_DEFERRED_CANCELED)))
if (!(event->dte_event & (TRIGGER_DEFERRED_DONE |
TRIGGER_DEFERRED_CANCELED)))
{
MemoryContextReset(per_tuple_context);
@@ -1577,16 +1579,16 @@ deferredTriggerInvokeEvents(bool immediate_only)
* should call it now.
*/
if (immediate_only &&
deferredTriggerCheckState(event->dte_item[i].dti_tgoid,
event->dte_item[i].dti_state))
deferredTriggerCheckState(event->dte_item[i].dti_tgoid,
event->dte_item[i].dti_state))
{
still_deferred_ones = true;
continue;
}
/*
* So let's fire it... but first, open the correct relation
* if this is not the same relation as before.
* So let's fire it... but first, open the correct
* relation if this is not the same relation as before.
*/
if (rel == NULL || rel->rd_id != event->dte_relid)
{
@@ -1596,14 +1598,14 @@ deferredTriggerInvokeEvents(bool immediate_only)
pfree(finfo);
/*
* We assume that an appropriate lock is still held by the
* executor, so grab no new lock here.
* We assume that an appropriate lock is still held by
* the executor, so grab no new lock here.
*/
rel = heap_open(event->dte_relid, NoLock);
/*
* Allocate space to cache fmgr lookup info for triggers
* of this relation.
* Allocate space to cache fmgr lookup info for
* triggers of this relation.
*/
finfo = (FmgrInfo *)
palloc(rel->trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -1615,15 +1617,15 @@ deferredTriggerInvokeEvents(bool immediate_only)
per_tuple_context);
event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
} /* end loop over items within event */
} /* end loop over items within event */
}
/*
* If it's now completely done, throw it away.
*
* NB: it's possible the trigger calls above added more events to the
* queue, or that calls we will do later will want to add more,
* so we have to be careful about maintaining list validity here.
* queue, or that calls we will do later will want to add more, so
* we have to be careful about maintaining list validity here.
*/
next_event = event->dte_next;
@@ -1724,6 +1726,7 @@ DeferredTriggerBeginXact(void)
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
deftrig_all_isset = false;
/*
* If unspecified, constraints default to IMMEDIATE, per SQL
*/
@@ -1827,8 +1830,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
/*
* If called outside a transaction block, we can safely return: this
* command cannot effect any subsequent transactions, and there
* are no "session-level" trigger settings.
* command cannot effect any subsequent transactions, and there are no
* "session-level" trigger settings.
*/
if (!IsTransactionBlock())
return;
@@ -1879,7 +1882,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
{
char *cname = strVal(lfirst(l));
ScanKeyData skey;
SysScanDesc tgscan;
SysScanDesc tgscan;
HeapTuple htup;
/*
@@ -1892,7 +1895,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* Setup to scan pg_trigger by tgconstrname ...
*/
ScanKeyEntryInitialize(&skey, (bits16) 0x0,
(AttrNumber) Anum_pg_trigger_tgconstrname,
(AttrNumber) Anum_pg_trigger_tgconstrname,
(RegProcedure) F_NAMEEQ,
PointerGetDatum(cname));
@@ -1910,9 +1913,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
Oid constr_oid;
/*
* If we found some, check that they fit the deferrability but
* skip ON <event> RESTRICT ones, since they are silently
* never deferrable.
* If we found some, check that they fit the deferrability
* but skip ON <event> RESTRICT ones, since they are
* silently never deferrable.
*/
if (stmt->deferred && !pg_trigger->tgdeferrable &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
@@ -1971,11 +1974,11 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
/*
* SQL99 requires that when a constraint is set to IMMEDIATE, any
* deferred checks against that constraint must be made when the
* SET CONSTRAINTS command is executed -- i.e. the effects of the
* SET CONSTRAINTS command applies retroactively. This happens "for
* free" since we have already made the necessary modifications to
* the constraints, and deferredTriggerEndQuery() is called by
* deferred checks against that constraint must be made when the SET
* CONSTRAINTS command is executed -- i.e. the effects of the SET
* CONSTRAINTS command applies retroactively. This happens "for free"
* since we have already made the necessary modifications to the
* constraints, and deferredTriggerEndQuery() is called by
* finish_xact_command().
*/
}
@@ -2062,6 +2065,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
break;
case TRIGGER_EVENT_UPDATE:
/*
* Check if one of the referenced keys is changed.
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.12 2002/08/29 00:17:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.13 2002/09/04 20:31:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -49,7 +49,7 @@
#include "utils/syscache.h"
static Oid findTypeIOFunction(List *procname, Oid typeOid, bool isOutput);
static Oid findTypeIOFunction(List *procname, Oid typeOid, bool isOutput);
/*
* DefineType
@@ -101,15 +101,15 @@ DefineType(List *names, List *parameters)
if (strcasecmp(defel->defname, "internallength") == 0)
internalLength = defGetTypeLength(defel);
else if (strcasecmp(defel->defname, "externallength") == 0)
; /* ignored -- remove after 7.3 */
; /* ignored -- remove after 7.3 */
else if (strcasecmp(defel->defname, "input") == 0)
inputName = defGetQualifiedName(defel);
else if (strcasecmp(defel->defname, "output") == 0)
outputName = defGetQualifiedName(defel);
else if (strcasecmp(defel->defname, "send") == 0)
; /* ignored -- remove after 7.3 */
; /* ignored -- remove after 7.3 */
else if (strcasecmp(defel->defname, "receive") == 0)
; /* ignored -- remove after 7.3 */
; /* ignored -- remove after 7.3 */
else if (strcasecmp(defel->defname, "delimiter") == 0)
{
char *p = defGetString(defel);
@@ -203,8 +203,9 @@ DefineType(List *names, List *parameters)
outputOid = findTypeIOFunction(outputName, typoid, true);
/*
* Verify that I/O procs return the expected thing. OPAQUE is an allowed,
* but deprecated, alternative to the fully type-safe choices.
* Verify that I/O procs return the expected thing. OPAQUE is an
* allowed, but deprecated, alternative to the fully type-safe
* choices.
*/
resulttype = get_func_rettype(inputOid);
if (!(OidIsValid(typoid) && resulttype == typoid))
@@ -229,26 +230,26 @@ DefineType(List *names, List *parameters)
* now have TypeCreate do all the real work.
*/
typoid =
TypeCreate(typeName, /* type name */
typeNamespace, /* namespace */
InvalidOid, /* preassigned type oid (not done here) */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
internalLength, /* internal size */
'b', /* type-type (base type) */
delimiter, /* array element delimiter */
inputOid, /* input procedure */
outputOid, /* output procedure */
elemType, /* element type ID */
InvalidOid, /* base type ID (only for domains) */
TypeCreate(typeName, /* type name */
typeNamespace, /* namespace */
InvalidOid, /* preassigned type oid (not done here) */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
internalLength, /* internal size */
'b', /* type-type (base type) */
delimiter, /* array element delimiter */
inputOid, /* input procedure */
outputOid, /* output procedure */
elemType, /* element type ID */
InvalidOid, /* base type ID (only for domains) */
defaultValue, /* default type value */
NULL, /* no binary form available */
byValue, /* passed by value */
alignment, /* required alignment */
storage, /* TOAST strategy */
-1, /* typMod (Domains only) */
0, /* Array Dimensions of typbasetype */
false); /* Type NOT NULL */
NULL, /* no binary form available */
byValue, /* passed by value */
alignment, /* required alignment */
storage, /* TOAST strategy */
-1, /* typMod (Domains only) */
0, /* Array Dimensions of typbasetype */
false); /* Type NOT NULL */
/*
* When we create a base type (as opposed to a complex type) we need
@@ -392,7 +393,7 @@ DefineDomain(CreateDomainStmt *stmt)
List *listptr;
Oid basetypeoid;
Oid domainoid;
Form_pg_type baseType;
Form_pg_type baseType;
/* Convert list of names to a name and namespace */
domainNamespace = QualifiedNameGetCreationNamespace(stmt->domainname,
@@ -406,7 +407,7 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Domainnames, unlike typenames don't need to account for the '_'
* prefix. So they can be one character longer.
* prefix. So they can be one character longer.
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
elog(ERROR, "CREATE DOMAIN: domain names must be %d characters or less",
@@ -421,9 +422,10 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid = HeapTupleGetOid(typeTup);
/*
* Base type must be a plain base type. Domains over pseudo types would
* create a security hole. Domains of domains might be made to work in
* the future, but not today. Ditto for domains over complex types.
* Base type must be a plain base type. Domains over pseudo types
* would create a security hole. Domains of domains might be made to
* work in the future, but not today. Ditto for domains over complex
* types.
*/
typtype = baseType->typtype;
if (typtype != 'b')
@@ -450,13 +452,13 @@ DefineDomain(CreateDomainStmt *stmt)
outputProcedure = baseType->typoutput;
/* Inherited default value */
datum = SysCacheGetAttr(TYPEOID, typeTup,
datum = SysCacheGetAttr(TYPEOID, typeTup,
Anum_pg_type_typdefault, &isnull);
if (!isnull)
defaultValue = DatumGetCString(DirectFunctionCall1(textout, datum));
/* Inherited default binary value */
datum = SysCacheGetAttr(TYPEOID, typeTup,
datum = SysCacheGetAttr(TYPEOID, typeTup,
Anum_pg_type_typdefaultbin, &isnull);
if (!isnull)
defaultValueBin = DatumGetCString(DirectFunctionCall1(textout, datum));
@@ -469,11 +471,11 @@ DefineDomain(CreateDomainStmt *stmt)
basetypelem = baseType->typelem;
/*
* Run through constraints manually to avoid the additional
* processing conducted by DefineRelation() and friends.
* Run through constraints manually to avoid the additional processing
* conducted by DefineRelation() and friends.
*
* Besides, we don't want any constraints to be cooked. We'll
* do that when the table is created via MergeDomainAttributes().
* Besides, we don't want any constraints to be cooked. We'll do that
* when the table is created via MergeDomainAttributes().
*/
foreach(listptr, schema)
{
@@ -482,77 +484,79 @@ DefineDomain(CreateDomainStmt *stmt)
switch (colDef->contype)
{
/*
* The inherited default value may be overridden by the user
* with the DEFAULT <expr> statement.
*
* We have to search the entire constraint tree returned as we
* don't want to cook or fiddle too much.
*/
/*
* The inherited default value may be overridden by the
* user with the DEFAULT <expr> statement.
*
* We have to search the entire constraint tree returned as
* we don't want to cook or fiddle too much.
*/
case CONSTR_DEFAULT:
if (defaultExpr)
elog(ERROR, "CREATE DOMAIN has multiple DEFAULT expressions");
/* Create a dummy ParseState for transformExpr */
pstate = make_parsestate(NULL);
/*
* Cook the colDef->raw_expr into an expression.
* Note: Name is strictly for error message
* Cook the colDef->raw_expr into an expression. Note:
* Name is strictly for error message
*/
defaultExpr = cookDefault(pstate, colDef->raw_expr,
basetypeoid,
stmt->typename->typmod,
domainName);
/*
* Expression must be stored as a nodeToString result,
* but we also require a valid textual representation
* (mainly to make life easier for pg_dump).
* Expression must be stored as a nodeToString result, but
* we also require a valid textual representation (mainly
* to make life easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
deparse_context_for(domainName,
InvalidOid),
false);
deparse_context_for(domainName,
InvalidOid),
false);
defaultValueBin = nodeToString(defaultExpr);
break;
/*
* Find the NULL constraint.
*/
/*
* Find the NULL constraint.
*/
case CONSTR_NOTNULL:
if (nullDefined)
elog(ERROR, "CREATE DOMAIN has conflicting NULL / NOT NULL constraint");
typNotNull = true;
nullDefined = true;
break;
break;
case CONSTR_NULL:
if (nullDefined)
elog(ERROR, "CREATE DOMAIN has conflicting NULL / NOT NULL constraint");
typNotNull = false;
nullDefined = true;
break;
break;
case CONSTR_UNIQUE:
elog(ERROR, "CREATE DOMAIN / UNIQUE indexes not supported");
break;
case CONSTR_UNIQUE:
elog(ERROR, "CREATE DOMAIN / UNIQUE indexes not supported");
break;
case CONSTR_PRIMARY:
elog(ERROR, "CREATE DOMAIN / PRIMARY KEY indexes not supported");
break;
case CONSTR_PRIMARY:
elog(ERROR, "CREATE DOMAIN / PRIMARY KEY indexes not supported");
break;
case CONSTR_CHECK:
elog(ERROR, "DefineDomain: CHECK Constraints not supported");
break;
case CONSTR_CHECK:
elog(ERROR, "DefineDomain: CHECK Constraints not supported");
break;
case CONSTR_ATTR_DEFERRABLE:
case CONSTR_ATTR_NOT_DEFERRABLE:
case CONSTR_ATTR_DEFERRED:
case CONSTR_ATTR_IMMEDIATE:
elog(ERROR, "DefineDomain: DEFERRABLE, NON DEFERRABLE, DEFERRED and IMMEDIATE not supported");
break;
case CONSTR_ATTR_DEFERRABLE:
case CONSTR_ATTR_NOT_DEFERRABLE:
case CONSTR_ATTR_DEFERRED:
case CONSTR_ATTR_IMMEDIATE:
elog(ERROR, "DefineDomain: DEFERRABLE, NON DEFERRABLE, DEFERRED and IMMEDIATE not supported");
break;
default:
elog(ERROR, "DefineDomain: unrecognized constraint node type");
break;
elog(ERROR, "DefineDomain: unrecognized constraint node type");
break;
}
}
@@ -560,33 +564,33 @@ DefineDomain(CreateDomainStmt *stmt)
* Have TypeCreate do all the real work.
*/
domainoid =
TypeCreate(domainName, /* type name */
TypeCreate(domainName, /* type name */
domainNamespace, /* namespace */
InvalidOid, /* preassigned type oid (none here) */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
InvalidOid, /* preassigned type oid (none here) */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
internalLength, /* internal size */
'd', /* type-type (domain type) */
delimiter, /* array element delimiter */
'd', /* type-type (domain type) */
delimiter, /* array element delimiter */
inputProcedure, /* input procedure */
outputProcedure, /* output procedure */
basetypelem, /* element type ID */
basetypeoid, /* base type ID */
defaultValue, /* default type value (text) */
basetypelem, /* element type ID */
basetypeoid, /* base type ID */
defaultValue, /* default type value (text) */
defaultValueBin, /* default type value (binary) */
byValue, /* passed by value */
alignment, /* required alignment */
storage, /* TOAST strategy */
stmt->typename->typmod, /* typeMod value */
typNDims, /* Array dimensions for base type */
typNotNull); /* Type NOT NULL */
byValue, /* passed by value */
alignment, /* required alignment */
storage, /* TOAST strategy */
stmt->typename->typmod, /* typeMod value */
typNDims, /* Array dimensions for base type */
typNotNull); /* Type NOT NULL */
/*
* Add any dependencies needed for the default expression.
*/
if (defaultExpr)
{
ObjectAddress domobject;
ObjectAddress domobject;
domobject.classId = RelOid_pg_type;
domobject.objectId = domainoid;
@@ -678,10 +682,10 @@ findTypeIOFunction(List *procname, Oid typeOid, bool isOutput)
if (isOutput)
{
/*
* Output functions can take a single argument of the type,
* or two arguments (data value, element OID). The signature
* may use OPAQUE in place of the actual type name; this is the
* only possibility if the type doesn't yet exist as a shell.
* Output functions can take a single argument of the type, or two
* arguments (data value, element OID). The signature may use
* OPAQUE in place of the actual type name; this is the only
* possibility if the type doesn't yet exist as a shell.
*
* Note: although we could throw a NOTICE in this routine if OPAQUE
* is used, we do not because of the probability that it'd be
@@ -728,8 +732,8 @@ findTypeIOFunction(List *procname, Oid typeOid, bool isOutput)
else
{
/*
* Input functions can take a single argument of type CSTRING,
* or three arguments (string, element OID, typmod). The signature
* Input functions can take a single argument of type CSTRING, or
* three arguments (string, element OID, typmod). The signature
* may use OPAQUE in place of CSTRING.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -793,7 +797,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
if (coldeflist == NIL)
elog(ERROR, "attempted to define composite type relation with"
" no attrs");
" no attrs");
/*
* now create the parameters for keys/inheritance etc. All of them are

View File

@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.110 2002/09/02 02:47:01 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.111 2002/09/04 20:31:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ extern bool Password_encryption;
static void CheckPgUserAclNotNull(void);
static void UpdateGroupMembership(Relation group_rel, HeapTuple group_tuple,
List *members);
List *members);
static IdList *IdListToArray(List *members);
static List *IdArrayToList(IdList *oldarray);
@@ -52,7 +52,8 @@ static List *IdArrayToList(IdList *oldarray);
* Outputs string in quotes, with double-quotes duplicated.
* We could use quote_ident(), but that expects a TEXT argument.
*/
static void fputs_quote(char *str, FILE *fp)
static void
fputs_quote(char *str, FILE *fp)
{
fputc('"', fp);
while (*str)
@@ -79,7 +80,7 @@ group_getfilename(void)
char *pfnam;
bufsize = strlen(DataDir) + strlen("/global/") +
strlen(USER_GROUP_FILE) + 1;
strlen(USER_GROUP_FILE) + 1;
pfnam = (char *) palloc(bufsize);
snprintf(pfnam, bufsize, "%s/global/%s", DataDir, USER_GROUP_FILE);
@@ -99,7 +100,7 @@ user_getfilename(void)
char *pfnam;
bufsize = strlen(DataDir) + strlen("/global/") +
strlen(PWD_FILE) + 1;
strlen(PWD_FILE) + 1;
pfnam = (char *) palloc(bufsize);
snprintf(pfnam, bufsize, "%s/global/%s", DataDir, PWD_FILE);
@@ -125,8 +126,8 @@ write_group_file(Relation urel, Relation grel)
/*
* Create a temporary filename to be renamed later. This prevents the
* backend from clobbering the pg_group file while the postmaster might
* be reading from it.
* backend from clobbering the pg_group file while the postmaster
* might be reading from it.
*/
filename = group_getfilename();
bufsize = strlen(filename) + 12;
@@ -143,14 +144,16 @@ write_group_file(Relation urel, Relation grel)
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Datum datum, grolist_datum;
Datum datum,
grolist_datum;
bool isnull;
char *groname;
IdList *grolist_p;
AclId *aidp;
int i, j,
int i,
j,
num;
char *usename;
char *usename;
bool first_user = true;
datum = heap_getattr(tuple, Anum_pg_group_groname, dsc, &isnull);
@@ -199,8 +202,8 @@ write_group_file(Relation urel, Relation grel)
continue;
}
/* File format is:
* "dbname" "user1" "user2" "user3"
/*
* File format is: "dbname" "user1" "user2" "user3"
*/
if (first_user)
{
@@ -833,8 +836,8 @@ AlterUserSet(AlterUserSetStmt *stmt)
valuestr = flatten_set_variable_args(stmt->variable, stmt->value);
/*
* RowExclusiveLock is sufficient, because we don't need to update
* the flat password file.
* RowExclusiveLock is sufficient, because we don't need to update the
* flat password file.
*/
rel = heap_openr(ShadowRelationName, RowExclusiveLock);
oldtuple = SearchSysCache(SHADOWNAME,
@@ -844,23 +847,23 @@ AlterUserSet(AlterUserSetStmt *stmt)
elog(ERROR, "user \"%s\" does not exist", stmt->user);
if (!(superuser()
|| ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
|| ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
elog(ERROR, "permission denied");
for (i = 0; i < Natts_pg_shadow; i++)
repl_repl[i] = ' ';
repl_repl[Anum_pg_shadow_useconfig-1] = 'r';
if (strcmp(stmt->variable, "all")==0 && valuestr == NULL)
repl_repl[Anum_pg_shadow_useconfig - 1] = 'r';
if (strcmp(stmt->variable, "all") == 0 && valuestr == NULL)
/* RESET ALL */
repl_null[Anum_pg_shadow_useconfig-1] = 'n';
repl_null[Anum_pg_shadow_useconfig - 1] = 'n';
else
{
Datum datum;
bool isnull;
ArrayType *array;
Datum datum;
bool isnull;
ArrayType *array;
repl_null[Anum_pg_shadow_useconfig-1] = ' ';
repl_null[Anum_pg_shadow_useconfig - 1] = ' ';
datum = SysCacheGetAttr(SHADOWNAME, oldtuple,
Anum_pg_shadow_useconfig, &isnull);
@@ -872,7 +875,7 @@ AlterUserSet(AlterUserSetStmt *stmt)
else
array = GUCArrayDelete(array, stmt->variable);
repl_val[Anum_pg_shadow_useconfig-1] = PointerGetDatum(array);
repl_val[Anum_pg_shadow_useconfig - 1] = PointerGetDatum(array);
}
newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl);
@@ -1253,12 +1256,12 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
* create user */
{
/*
* convert the to be added usernames to sysids and add them to
* the list
* convert the to be added usernames to sysids and add them to the
* list
*/
foreach(item, stmt->listUsers)
{
int32 sysid;
int32 sysid;
if (strcmp(tag, "ALTER GROUP") == 0)
{
@@ -1282,6 +1285,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
if (!intMember(sysid, newlist))
newlist = lappendi(newlist, sysid);
else
/*
* we silently assume here that this error will only come
* up in a ALTER GROUP statement
@@ -1306,8 +1310,8 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
else
{
/*
* convert the to be dropped usernames to sysids and
* remove them from the list
* convert the to be dropped usernames to sysids and remove
* them from the list
*/
foreach(item, stmt->listUsers)
{
@@ -1375,7 +1379,7 @@ UpdateGroupMembership(Relation group_rel, HeapTuple group_tuple,
new_record_repl[Anum_pg_group_grolist - 1] = 'r';
tuple = heap_modifytuple(group_tuple, group_rel,
new_record, new_record_nulls, new_record_repl);
new_record, new_record_nulls, new_record_repl);
simple_heap_update(group_rel, &group_tuple->t_self, tuple);
@@ -1401,12 +1405,10 @@ IdListToArray(List *members)
newarray->elemtype = INT4OID;
ARR_NDIM(newarray) = 1; /* one dimensional array */
ARR_LBOUND(newarray)[0] = 1; /* axis starts at one */
ARR_DIMS(newarray)[0] = nmembers; /* axis is this long */
ARR_DIMS(newarray)[0] = nmembers; /* axis is this long */
i = 0;
foreach(item, members)
{
((int *) ARR_DATA_PTR(newarray))[i++] = lfirsti(item);
}
return newarray;
}

View File

@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.236 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.237 2002/09/04 20:31:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -100,7 +100,7 @@ typedef struct VRelStats
static MemoryContext vac_context = NULL;
static int elevel = -1;
static int elevel = -1;
static TransactionId OldestXmin;
static TransactionId FreezeLimit;
@@ -204,8 +204,9 @@ vacuum(VacuumStmt *vacstmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
* If we are running only ANALYZE, we don't need per-table transactions,
* but we still need a memory context with table lifetime.
* If we are running only ANALYZE, we don't need per-table
* transactions, but we still need a memory context with table
* lifetime.
*/
if (vacstmt->analyze && !vacstmt->vacuum)
anl_context = AllocSetContextCreate(QueryContext,
@@ -221,29 +222,29 @@ vacuum(VacuumStmt *vacstmt)
* Formerly, there was code here to prevent more than one VACUUM from
* executing concurrently in the same database. However, there's no
* good reason to prevent that, and manually removing lockfiles after
* a vacuum crash was a pain for dbadmins. So, forget about lockfiles,
* and just rely on the locks we grab on each target table
* a vacuum crash was a pain for dbadmins. So, forget about
* lockfiles, and just rely on the locks we grab on each target table
* to ensure that there aren't two VACUUMs running on the same table
* at the same time.
*/
/*
* The strangeness with committing and starting transactions here is due
* to wanting to run each table's VACUUM as a separate transaction, so
* that we don't hold locks unnecessarily long. Also, if we are doing
* VACUUM ANALYZE, the ANALYZE part runs as a separate transaction from
* the VACUUM to further reduce locking.
* The strangeness with committing and starting transactions here is
* due to wanting to run each table's VACUUM as a separate
* transaction, so that we don't hold locks unnecessarily long. Also,
* if we are doing VACUUM ANALYZE, the ANALYZE part runs as a separate
* transaction from the VACUUM to further reduce locking.
*
* vacuum_rel expects to be entered with no transaction active; it will
* start and commit its own transaction. But we are called by an SQL
* command, and so we are executing inside a transaction already. We
* commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back in
* PostgresMain().
* another one before exiting to match the commit waiting for us back
* in PostgresMain().
*
* In the case of an ANALYZE statement (no vacuum, just analyze) it's
* okay to run the whole thing in the outer transaction, and so we skip
* transaction start/stop operations.
* okay to run the whole thing in the outer transaction, and so we
* skip transaction start/stop operations.
*/
if (vacstmt->vacuum)
{
@@ -254,22 +255,23 @@ vacuum(VacuumStmt *vacstmt)
*
* Compute the initially applicable OldestXmin and FreezeLimit
* XIDs, so that we can record these values at the end of the
* VACUUM. Note that individual tables may well be processed with
* newer values, but we can guarantee that no (non-shared)
* relations are processed with older ones.
* VACUUM. Note that individual tables may well be processed
* with newer values, but we can guarantee that no
* (non-shared) relations are processed with older ones.
*
* It is okay to record non-shared values in pg_database, even though
* we may vacuum shared relations with older cutoffs, because only
* the minimum of the values present in pg_database matters. We
* can be sure that shared relations have at some time been
* vacuumed with cutoffs no worse than the global minimum; for, if
* there is a backend in some other DB with xmin = OLDXMIN that's
* determining the cutoff with which we vacuum shared relations,
* it is not possible for that database to have a cutoff newer
* than OLDXMIN recorded in pg_database.
* It is okay to record non-shared values in pg_database, even
* though we may vacuum shared relations with older cutoffs,
* because only the minimum of the values present in
* pg_database matters. We can be sure that shared relations
* have at some time been vacuumed with cutoffs no worse than
* the global minimum; for, if there is a backend in some
* other DB with xmin = OLDXMIN that's determining the cutoff
* with which we vacuum shared relations, it is not possible
* for that database to have a cutoff newer than OLDXMIN
* recorded in pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin, &initialFreezeLimit);
&initialOldestXmin, &initialFreezeLimit);
}
/* matches the StartTransaction in PostgresMain() */
@@ -281,7 +283,7 @@ vacuum(VacuumStmt *vacstmt)
*/
foreach(cur, vrl)
{
Oid relid = (Oid) lfirsti(cur);
Oid relid = (Oid) lfirsti(cur);
if (vacstmt->vacuum)
vacuum_rel(relid, vacstmt, RELKIND_RELATION);
@@ -290,10 +292,11 @@ vacuum(VacuumStmt *vacstmt)
MemoryContext old_context = NULL;
/*
* If we vacuumed, use new transaction for analyze. Otherwise,
* we can use the outer transaction, but we still need to call
* analyze_rel in a memory context that will be cleaned up on
* return (else we leak memory while processing multiple tables).
* If we vacuumed, use new transaction for analyze.
* Otherwise, we can use the outer transaction, but we still
* need to call analyze_rel in a memory context that will be
* cleaned up on return (else we leak memory while processing
* multiple tables).
*/
if (vacstmt->vacuum)
StartTransactionCommand(true);
@@ -320,16 +323,17 @@ vacuum(VacuumStmt *vacstmt)
/* here, we are not in a transaction */
/*
* This matches the CommitTransaction waiting for us in PostgresMain().
* We tell xact.c not to chain the upcoming commit, so that a VACUUM
* doesn't start a transaction block, even when autocommit is off.
* This matches the CommitTransaction waiting for us in
* PostgresMain(). We tell xact.c not to chain the upcoming
* commit, so that a VACUUM doesn't start a transaction block,
* even when autocommit is off.
*/
StartTransactionCommand(true);
/*
* If we did a database-wide VACUUM, update the database's pg_database
* row with info about the transaction IDs used, and try to truncate
* pg_clog.
* If we did a database-wide VACUUM, update the database's
* pg_database row with info about the transaction IDs used, and
* try to truncate pg_clog.
*/
if (vacstmt->relation == NULL)
{
@@ -366,7 +370,7 @@ getrels(const RangeVar *vacrel, const char *stmttype)
if (vacrel)
{
/* Process specific relation */
Oid relid;
Oid relid;
relid = RangeVarGetRelid(vacrel, false);
@@ -517,9 +521,9 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
/*
* Invalidate the tuple in the catcaches; this also arranges to flush
* the relation's relcache entry. (If we fail to commit for some reason,
* no flush will occur, but no great harm is done since there are no
* noncritical state updates here.)
* the relation's relcache entry. (If we fail to commit for some
* reason, no flush will occur, but no great harm is done since there
* are no noncritical state updates here.)
*/
CacheInvalidateHeapTuple(rd, &rtup);
@@ -647,8 +651,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
heap_close(relation, AccessShareLock);
/*
* Do not truncate CLOG if we seem to have suffered wraparound already;
* the computed minimum XID might be bogus.
* Do not truncate CLOG if we seem to have suffered wraparound
* already; the computed minimum XID might be bogus.
*/
if (vacuumAlreadyWrapped)
{
@@ -740,7 +744,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's
* not a shared relation). pg_class_ownercheck includes the superuser case.
* not a shared relation). pg_class_ownercheck includes the superuser
* case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
@@ -1581,21 +1586,23 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* by "recent" transactions then we have to move all chain of
* tuples to another places.
*
* NOTE: this test is not 100% accurate: it is possible for
* a tuple to be an updated one with recent xmin, and yet not
* have a corresponding tuple in the vtlinks list. Presumably
* NOTE: this test is not 100% accurate: it is possible for a
* tuple to be an updated one with recent xmin, and yet not
* have a corresponding tuple in the vtlinks list. Presumably
* there was once a parent tuple with xmax matching the xmin,
* but it's possible that that tuple has been removed --- for
* example, if it had xmin = xmax then HeapTupleSatisfiesVacuum
* would deem it removable as soon as the xmin xact completes.
* example, if it had xmin = xmax then
* HeapTupleSatisfiesVacuum would deem it removable as soon as
* the xmin xact completes.
*
* To be on the safe side, we abandon the repair_frag process if
* we cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
* we cannot find the parent tuple in vtlinks. This may be
* overly conservative; AFAICS it would be safe to move the
* chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
OldestXmin)) ||
OldestXmin)) ||
(!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tuple.t_self),
@@ -1626,7 +1633,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (vacrelstats->vtlinks == NULL)
{
elog(WARNING, "Parent item in update-chain not found - can't continue repair_frag");
break; /* out of walk-along-page loop */
break; /* out of walk-along-page loop */
}
vtmove = (VTupleMove) palloc(100 * sizeof(VTupleMoveData));
@@ -1638,7 +1645,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* we have to move to the end of chain.
*/
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) &&
HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tp.t_self),
&(tp.t_data->t_ctid))))
{
@@ -1704,7 +1711,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
/* can't move item anywhere */
chain_move_failed = true;
break; /* out of check-all-items loop */
break; /* out of check-all-items loop */
}
to_item = i;
to_vacpage = fraged_pages->pagedesc[to_item];
@@ -1732,8 +1739,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* At beginning of chain? */
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
OldestXmin))
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
OldestXmin))
break;
/* No, move to tuple with prior row version */
@@ -1749,14 +1756,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* see discussion above */
elog(WARNING, "Parent item in update-chain not found - can't continue repair_frag");
chain_move_failed = true;
break; /* out of check-all-items loop */
break; /* out of check-all-items loop */
}
tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tp.t_self)));
ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self)));
ItemPointerGetOffsetNumber(&(tp.t_self)));
/* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "Parent itemid marked as unused");
@@ -1768,25 +1775,24 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
&(Ptp.t_data->t_ctid)));
/*
* Read above about cases when
* !ItemIdIsUsed(Citemid) (child item is
* removed)... Due to the fact that at the moment
* we don't remove unuseful part of update-chain,
* it's possible to get too old parent row here.
* Like as in the case which caused this problem,
* we stop shrinking here. I could try to find
* real parent row but want not to do it because
* of real solution will be implemented anyway,
* later, and we are too close to 6.5 release. -
* vadim 06/11/99
* Read above about cases when !ItemIdIsUsed(Citemid)
* (child item is removed)... Due to the fact that at
* the moment we don't remove unuseful part of
* update-chain, it's possible to get too old parent
* row here. Like as in the case which caused this
* problem, we stop shrinking here. I could try to
* find real parent row but want not to do it because
* of real solution will be implemented anyway, later,
* and we are too close to 6.5 release. - vadim
* 06/11/99
*/
if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
HeapTupleHeaderGetXmin(tp.t_data))))
HeapTupleHeaderGetXmin(tp.t_data))))
{
ReleaseBuffer(Pbuf);
elog(WARNING, "Too old parent tuple found - can't continue repair_frag");
chain_move_failed = true;
break; /* out of check-all-items loop */
break; /* out of check-all-items loop */
}
tp.t_datamcxt = Ptp.t_datamcxt;
tp.t_data = Ptp.t_data;
@@ -1795,7 +1801,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ReleaseBuffer(Cbuf);
Cbuf = Pbuf;
freeCbuf = true;
} /* end of check-all-items loop */
} /* end of check-all-items loop */
if (freeCbuf)
ReleaseBuffer(Cbuf);
@@ -1804,9 +1810,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (chain_move_failed)
{
/*
* Undo changes to offsets_used state. We don't bother
* cleaning up the amount-free state, since we're not
* going to do any further tuple motion.
* Undo changes to offsets_used state. We don't
* bother cleaning up the amount-free state, since
* we're not going to do any further tuple motion.
*/
for (i = 0; i < num_vtmove; i++)
{
@@ -1939,7 +1945,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
else
{
/* No XLOG record, but still need to flag that XID exists on disk */
/*
* No XLOG record, but still need to flag that XID
* exists on disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -1985,7 +1994,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
WriteBuffer(cur_buffer);
WriteBuffer(Cbuf);
} /* end of move-the-tuple-chain loop */
} /* end of move-the-tuple-chain loop */
cur_buffer = InvalidBuffer;
pfree(vtmove);
@@ -1993,7 +2002,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* advance to next tuple in walk-along-page loop */
continue;
} /* end of is-tuple-in-chain test */
} /* end of is-tuple-in-chain test */
/* try to find new page for this tuple */
if (cur_buffer == InvalidBuffer ||
@@ -2031,10 +2040,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* register invalidation of source tuple in catcaches.
*
* (Note: we do not need to register the copied tuple,
* because we are not changing the tuple contents and
* so there cannot be any need to flush negative
* catcache entries.)
* (Note: we do not need to register the copied tuple, because we
* are not changing the tuple contents and so there cannot be
* any need to flush negative catcache entries.)
*/
CacheInvalidateHeapTuple(onerel, &tuple);
@@ -2090,7 +2098,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
else
{
/* No XLOG record, but still need to flag that XID exists on disk */
/*
* No XLOG record, but still need to flag that XID exists
* on disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -2116,8 +2127,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} /* walk along page */
/*
* If we broke out of the walk-along-page loop early (ie, still have
* offnum <= maxoff), then we failed to move some tuple off
* If we broke out of the walk-along-page loop early (ie, still
* have offnum <= maxoff), then we failed to move some tuple off
* this page. No point in shrinking any more, so clean up and
* exit the per-page loop.
*/
@@ -2126,7 +2137,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
OffsetNumber off;
/*
* Fix vacpage state for any unvisited tuples remaining on page
* Fix vacpage state for any unvisited tuples remaining on
* page
*/
for (off = OffsetNumberNext(offnum);
off <= maxoff;
@@ -2389,7 +2401,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
else
{
/* No XLOG record, but still need to flag that XID exists on disk */
/*
* No XLOG record, but still need to flag that XID exists
* on disk
*/
MyXactMadeTempRelUpdate = true;
}

View File

@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.18 2002/08/06 02:36:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.19 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef struct LVRelStats
} LVRelStats;
static int elevel = -1;
static int elevel = -1;
static TransactionId OldestXmin;
static TransactionId FreezeLimit;
@@ -756,7 +756,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
*/
elog(elevel, "Truncated %u --> %u pages.\n\t%s", old_rel_pages,
new_rel_pages, vac_show_rusage(&ru0));
new_rel_pages, vac_show_rusage(&ru0));
}
/*

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.70 2002/07/18 02:02:29 ishii Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.71 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -111,8 +111,8 @@ assign_datestyle(const char *value, bool doit, bool interactive)
* Easiest way to get the current DEFAULT state is to fetch
* the DEFAULT string from guc.c and recursively parse it.
*
* We can't simply "return assign_datestyle(...)" because we
* need to handle constructs like "DEFAULT, ISO".
* We can't simply "return assign_datestyle(...)" because we need
* to handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
bool saveEuroDates = EuroDates;
@@ -164,7 +164,7 @@ assign_datestyle(const char *value, bool doit, bool interactive)
return value;
/*
* Prepare the canonical string to return. GUC wants it malloc'd.
* Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = (char *) malloc(32);
if (!result)
@@ -188,8 +188,8 @@ assign_datestyle(const char *value, bool doit, bool interactive)
strcat(result, newEuroDates ? ", EURO" : ", US");
/*
* Finally, it's safe to assign to the global variables;
* the assignment cannot fail now.
* Finally, it's safe to assign to the global variables; the
* assignment cannot fail now.
*/
DateStyle = newDateStyle;
EuroDates = newEuroDates;
@@ -203,7 +203,7 @@ assign_datestyle(const char *value, bool doit, bool interactive)
const char *
show_datestyle(void)
{
static char buf[64];
static char buf[64];
switch (DateStyle)
{
@@ -270,6 +270,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
return NULL;
}
*endptr = '\0';
/*
* Try to parse it. XXX an invalid interval format will result in
* elog, which is not desirable for GUC. We did what we could to
@@ -277,9 +278,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
* coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
CStringGetDatum(val),
ObjectIdGetDatum(InvalidOid),
Int32GetDatum(-1)));
CStringGetDatum(val),
ObjectIdGetDatum(InvalidOid),
Int32GetDatum(-1)));
pfree(val);
if (interval->month != 0)
{
@@ -318,8 +319,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
* available under Solaris, among others. Apparently putenv()
* called as below clears the process-specific environment
* variables. Other reasonable arguments to putenv() (e.g.
* "TZ=", "TZ", "") result in a core dump (under Linux anyway).
* - thomas 1998-01-26
* "TZ=", "TZ", "") result in a core dump (under Linux
* anyway). - thomas 1998-01-26
*/
if (doit)
{
@@ -339,13 +340,14 @@ assign_timezone(const char *value, bool doit, bool interactive)
* Otherwise assume it is a timezone name.
*
* XXX unfortunately we have no reasonable way to check whether a
* timezone name is good, so we have to just assume that it is.
* timezone name is good, so we have to just assume that it
* is.
*/
if (doit)
{
strcpy(tzbuf, "TZ=");
strncat(tzbuf, value, sizeof(tzbuf)-4);
if (putenv(tzbuf) != 0) /* shouldn't happen? */
strncat(tzbuf, value, sizeof(tzbuf) - 4);
if (putenv(tzbuf) != 0) /* shouldn't happen? */
elog(LOG, "assign_timezone: putenv failed");
tzset();
HasCTZSet = false;
@@ -360,7 +362,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
return value;
/*
* Prepare the canonical string to return. GUC wants it malloc'd.
* Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = (char *) malloc(sizeof(tzbuf));
if (!result)
@@ -372,13 +374,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
(double) CTimeZone / 3600.0);
}
else if (tzbuf[0] == 'T')
{
strcpy(result, tzbuf + 3);
}
else
{
strcpy(result, "UNKNOWN");
}
return result;
}
@@ -399,7 +397,7 @@ show_timezone(void)
interval.time = CTimeZone;
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
IntervalPGetDatum(&interval)));
IntervalPGetDatum(&interval)));
}
else
tzn = getenv("TZ");
@@ -422,11 +420,20 @@ assign_XactIsoLevel(const char *value, bool doit, bool interactive)
elog(ERROR, "SET TRANSACTION ISOLATION LEVEL must be called before any query");
if (strcmp(value, "serializable") == 0)
{ if (doit) XactIsoLevel = XACT_SERIALIZABLE; }
{
if (doit)
XactIsoLevel = XACT_SERIALIZABLE;
}
else if (strcmp(value, "read committed") == 0)
{ if (doit) XactIsoLevel = XACT_READ_COMMITTED; }
{
if (doit)
XactIsoLevel = XACT_READ_COMMITTED;
}
else if (strcmp(value, "default") == 0)
{ if (doit) XactIsoLevel = DefaultXactIsoLevel; }
{
if (doit)
XactIsoLevel = DefaultXactIsoLevel;
}
else
return NULL;
@@ -475,11 +482,12 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
encoding = pg_valid_client_encoding(value);
if (encoding < 0)
return NULL;
/* XXX SetClientEncoding depends on namespace functions which are
* not available at startup time. So we accept requested client
* encoding anyway which might not be valid (e.g. no conversion
* procs available).
/*
* XXX SetClientEncoding depends on namespace functions which are not
* available at startup time. So we accept requested client encoding
* anyway which might not be valid (e.g. no conversion procs
* available).
*/
if (SetClientEncoding(encoding, doit) < 0)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.70 2002/09/02 20:04:40 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.71 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
* the (non-junk) targetlist items from the view's SELECT list.
*/
attrList = NIL;
foreach (t, tlist)
foreach(t, tlist)
{
TargetEntry *entry = lfirst(t);
Resdom *res = entry->resdom;
@@ -115,32 +115,32 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
* Create a tuple descriptor to compare against the existing view,
* and verify it matches.
*/
descriptor = BuildDescForRelation(attrList);
descriptor = BuildDescForRelation(attrList);
checkViewTupleDesc(descriptor, rel->rd_att);
/*
* Seems okay, so return the OID of the pre-existing view.
*/
relation_close(rel, NoLock); /* keep the lock! */
relation_close(rel, NoLock); /* keep the lock! */
return viewOid;
}
else
{
/*
* now create the parameters for keys/inheritance etc. All of them are
* nil...
* now create the parameters for keys/inheritance etc. All of them
* are nil...
*/
createStmt->relation = (RangeVar *) relation;
createStmt->tableElts = attrList;
createStmt->inhRelations = NIL;
createStmt->constraints = NIL;
createStmt->hasoids = false;
/*
* finally create the relation (this will error out if there's
* an existing view, so we don't need more code to complain
* if "replace" is false).
* finally create the relation (this will error out if there's an
* existing view, so we don't need more code to complain if
* "replace" is false).
*/
return DefineRelation(createStmt, RELKIND_VIEW);
}
@@ -179,6 +179,7 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
NameStr(oldattr->attname));
/* We can ignore the remaining attributes of an attribute... */
}
/*
* We ignore the constraint fields. The new view desc can't have any
* constraints, and the only ones that could be on the old view are
@@ -316,8 +317,8 @@ DefineView(const RangeVar *view, Query *viewParse, bool replace)
/*
* Create the view relation
*
* NOTE: if it already exists and replace is false, the xact will
* be aborted.
* NOTE: if it already exists and replace is false, the xact will be
* aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.31 2002/07/20 05:16:57 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.32 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,8 +383,8 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
* information for the new "clean" tuple.
*
* Note: we use memory on the stack to optimize things when we are
* dealing with a small number of attributes. for large tuples we
* just use palloc.
* dealing with a small number of attributes. for large tuples we just
* use palloc.
*/
if (cleanLength > 64)
{

View File

@@ -27,7 +27,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.177 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.178 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,14 +63,14 @@ static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan,
ScanDirection direction,
DestReceiver *destfunc);
static void ExecSelect(TupleTableSlot *slot,
DestReceiver *destfunc,
EState *estate);
DestReceiver *destfunc,
EState *estate);
static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
EState *estate);
static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
EState *estate);
static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
EState *estate);
EState *estate);
static TupleTableSlot *EvalPlanQualNext(EState *estate);
static void EndEvalPlanQual(EState *estate);
static void ExecCheckQueryPerms(CmdType operation, Query *parseTree,
@@ -116,9 +116,9 @@ ExecutorStart(QueryDesc *queryDesc, EState *estate)
/*
* Make our own private copy of the current query snapshot data.
*
* This "freezes" our idea of which tuples are good and which are not
* for the life of this query, even if it outlives the current command
* and current snapshot.
* This "freezes" our idea of which tuples are good and which are not for
* the life of this query, even if it outlives the current command and
* current snapshot.
*/
estate->es_snapshot = CopyQuerySnapshot();
@@ -353,12 +353,13 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
Oid userid;
AclResult aclcheck_result;
/*
/*
* Only plain-relation RTEs need to be checked here. Subquery RTEs
* will be checked when ExecCheckPlanPerms finds the SubqueryScan node,
* and function RTEs are checked by init_fcache when the function is
* prepared for execution. Join and special RTEs need no checks.
*/
* will be checked when ExecCheckPlanPerms finds the SubqueryScan
* node, and function RTEs are checked by init_fcache when the
* function is prepared for execution. Join and special RTEs need no
* checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@@ -1071,7 +1072,8 @@ lnext: ;
slot = ExecStoreTuple(newTuple, /* tuple to store */
junkfilter->jf_resultSlot, /* dest slot */
InvalidBuffer, /* this tuple has no buffer */
InvalidBuffer, /* this tuple has no
* buffer */
true); /* tuple should be pfreed */
}
@@ -1083,8 +1085,9 @@ lnext: ;
switch (operation)
{
case CMD_SELECT:
ExecSelect(slot, /* slot containing tuple */
destfunc, /* destination's tuple-receiver obj */
ExecSelect(slot, /* slot containing tuple */
destfunc, /* destination's tuple-receiver
* obj */
estate);
result = slot;
break;
@@ -1357,8 +1360,8 @@ ldelete:;
*/
static void
ExecUpdate(TupleTableSlot *slot,
ItemPointer tupleid,
EState *estate)
ItemPointer tupleid,
EState *estate)
{
HeapTuple tuple;
ResultRelInfo *resultRelInfo;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.107 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.108 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
static Datum ExecEvalOper(Expr *opClause, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalDistinct(Expr *opClause, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFunc(Expr *funcClause, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
@@ -70,8 +70,8 @@ static Datum ExecEvalNullTest(NullTest *ntest, ExprContext *econtext,
static Datum ExecEvalBooleanTest(BooleanTest *btest, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConstraintTest(ConstraintTest *constraint,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
/*----------
@@ -848,7 +848,7 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
* ExecMakeTableFunctionResult
*
* Evaluate a table function, producing a materialized result in a Tuplestore
* object. (If function returns an empty set, we just return NULL instead.)
* object. (If function returns an empty set, we just return NULL instead.)
*/
Tuplestorestate *
ExecMakeTableFunctionResult(Expr *funcexpr,
@@ -871,13 +871,14 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
bool returnsTuple = false;
/* Extract data from function-call expression node */
if (!funcexpr || !IsA(funcexpr, Expr) || funcexpr->opType != FUNC_EXPR)
if (!funcexpr || !IsA(funcexpr, Expr) ||funcexpr->opType != FUNC_EXPR)
elog(ERROR, "ExecMakeTableFunctionResult: expression is not a function call");
func = (Func *) funcexpr->oper;
argList = funcexpr->args;
/*
* get the fcache from the Func node. If it is NULL, then initialize it
* get the fcache from the Func node. If it is NULL, then initialize
* it
*/
fcache = func->func_fcache;
if (fcache == NULL)
@@ -892,7 +893,7 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
*
* Note: ideally, we'd do this in the per-tuple context, but then the
* argument values would disappear when we reset the context in the
* inner loop. So do it in caller context. Perhaps we should make a
* inner loop. So do it in caller context. Perhaps we should make a
* separate context just to hold the evaluated arguments?
*/
MemSet(&fcinfo, 0, sizeof(fcinfo));
@@ -921,8 +922,9 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
}
/*
* Prepare a resultinfo node for communication. We always do this even
* if not expecting a set result, so that we can pass expectedDesc.
* Prepare a resultinfo node for communication. We always do this
* even if not expecting a set result, so that we can pass
* expectedDesc.
*/
fcinfo.resultinfo = (Node *) &rsinfo;
rsinfo.type = T_ReturnSetInfo;
@@ -948,8 +950,9 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
HeapTuple tuple;
/*
* reset per-tuple memory context before each call of the function.
* This cleans up any local memory the function may leak when called.
* reset per-tuple memory context before each call of the
* function. This cleans up any local memory the function may leak
* when called.
*/
ResetExprContext(econtext);
@@ -964,18 +967,20 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
/*
* Check for end of result set.
*
* Note: if function returns an empty set, we don't build a
* Note: if function returns an empty set, we don't build a
* tupdesc or tuplestore (since we can't get a tupdesc in the
* function-returning-tuple case)
*/
if (rsinfo.isDone == ExprEndResult)
break;
/*
* If first time through, build tupdesc and tuplestore for result
* If first time through, build tupdesc and tuplestore for
* result
*/
if (first_time)
{
Oid funcrettype = funcexpr->typeOid;
Oid funcrettype = funcexpr->typeOid;
oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (funcrettype == RECORDOID ||
@@ -1006,7 +1011,7 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
0,
false);
}
tupstore = tuplestore_begin_heap(true, /* randomAccess */
tupstore = tuplestore_begin_heap(true, /* randomAccess */
SortMem);
MemoryContextSwitchTo(oldcontext);
rsinfo.setResult = tupstore;
@@ -1026,7 +1031,7 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
}
else
{
char nullflag;
char nullflag;
nullflag = fcinfo.isnull ? 'n' : ' ';
tuple = heap_formtuple(tupdesc, &result, &nullflag);
@@ -1180,7 +1185,7 @@ ExecEvalDistinct(Expr *opClause,
bool *isNull,
ExprDoneCond *isDone)
{
bool result;
bool result;
FunctionCachePtr fcache;
FunctionCallInfoData fcinfo;
ExprDoneCond argDone;

View File

@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.58 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.59 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -681,7 +681,7 @@ ExecTypeFromTL(List *targetList, bool hasoid)
TupleTableSlot *
TupleDescGetSlot(TupleDesc tupdesc)
{
TupleTableSlot *slot;
TupleTableSlot *slot;
/* Make a standalone slot */
slot = MakeTupleTableSlot();
@@ -701,19 +701,20 @@ TupleDescGetSlot(TupleDesc tupdesc)
AttInMetadata *
TupleDescGetAttInMetadata(TupleDesc tupdesc)
{
int natts = tupdesc->natts;
int i;
Oid atttypeid;
Oid attinfuncid;
FmgrInfo *attinfuncinfo;
Oid *attelems;
int32 *atttypmods;
AttInMetadata *attinmeta;
int natts = tupdesc->natts;
int i;
Oid atttypeid;
Oid attinfuncid;
FmgrInfo *attinfuncinfo;
Oid *attelems;
int32 *atttypmods;
AttInMetadata *attinmeta;
attinmeta = (AttInMetadata *) palloc(sizeof(AttInMetadata));
/*
* Gather info needed later to call the "in" function for each attribute
* Gather info needed later to call the "in" function for each
* attribute
*/
attinfuncinfo = (FmgrInfo *) palloc(natts * sizeof(FmgrInfo));
attelems = (Oid *) palloc(natts * sizeof(Oid));
@@ -741,14 +742,14 @@ TupleDescGetAttInMetadata(TupleDesc tupdesc)
HeapTuple
BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
{
TupleDesc tupdesc = attinmeta->tupdesc;
int natts = tupdesc->natts;
Datum *dvalues;
char *nulls;
int i;
Oid attelem;
int32 atttypmod;
HeapTuple tuple;
TupleDesc tupdesc = attinmeta->tupdesc;
int natts = tupdesc->natts;
Datum *dvalues;
char *nulls;
int i;
Oid attelem;
int32 atttypmod;
HeapTuple tuple;
dvalues = (Datum *) palloc(natts * sizeof(Datum));
nulls = (char *) palloc(natts * sizeof(char));
@@ -843,13 +844,14 @@ do_text_output_multiline(TupOutputState *tstate, char *text)
{
while (*text)
{
char *eol;
char *eol;
eol = strchr(text, '\n');
if (eol)
*eol++ = '\0';
else
eol = text + strlen(text);
eol = text +strlen(text);
do_tup_output(tstate, &text);
text = eol;
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.89 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.90 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -297,19 +297,19 @@ ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate)
/*
* This is pretty grotty: we need to ensure that result tuples have
* space for an OID iff they are going to be stored into a relation
* that has OIDs. We assume that estate->es_result_relation_info
* is already set up to describe the target relation. One reason
* this is ugly is that all plan nodes in the plan tree will emit
* tuples with space for an OID, though we really only need the topmost
* plan to do so.
* that has OIDs. We assume that estate->es_result_relation_info is
* already set up to describe the target relation. One reason this is
* ugly is that all plan nodes in the plan tree will emit tuples with
* space for an OID, though we really only need the topmost plan to do
* so.
*
* It would be better to have InitPlan adjust the topmost plan node's
* output descriptor after plan tree initialization. However, that
* doesn't quite work because in an UPDATE that spans an inheritance
* tree, some of the target relations may have OIDs and some not.
* We have to make the decision on a per-relation basis as we initialize
* each of the child plans of the topmost Append plan. So, this is ugly
* but it works, for now ...
* tree, some of the target relations may have OIDs and some not. We
* have to make the decision on a per-relation basis as we initialize
* each of the child plans of the topmost Append plan. So, this is
* ugly but it works, for now ...
*/
ri = node->state->es_result_relation_info;
if (ri != NULL)
@@ -319,7 +319,7 @@ ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate)
if (rel != NULL)
hasoid = rel->rd_rel->relhasoids;
}
tupDesc = ExecTypeFromTL(node->targetlist, hasoid);
ExecAssignResultType(commonstate, tupDesc, true);
}
@@ -696,7 +696,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
nullv);
/*
* The index AM does the rest. Note we suppress unique-index
* The index AM does the rest. Note we suppress unique-index
* checks if we are being called from VACUUM, since VACUUM may
* need to move dead tuples that have the same keys as live ones.
*/
@@ -705,7 +705,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
nullv, /* info on nulls */
&(heapTuple->t_self), /* tid of heap tuple */
heapRelation,
relationDescs[i]->rd_index->indisunique && !is_vacuum);
relationDescs[i]->rd_index->indisunique && !is_vacuum);
/*
* keep track of index inserts for debugging
@@ -753,7 +753,7 @@ RegisterExprContextCallback(ExprContext *econtext,
ExprContextCallbackFunction function,
Datum arg)
{
ExprContext_CB *ecxt_callback;
ExprContext_CB *ecxt_callback;
/* Save the info in appropriate memory context */
ecxt_callback = (ExprContext_CB *)
@@ -779,8 +779,8 @@ UnregisterExprContextCallback(ExprContext *econtext,
ExprContextCallbackFunction function,
Datum arg)
{
ExprContext_CB **prev_callback;
ExprContext_CB *ecxt_callback;
ExprContext_CB **prev_callback;
ExprContext_CB *ecxt_callback;
prev_callback = &econtext->ecxt_callbacks;
@@ -792,9 +792,7 @@ UnregisterExprContextCallback(ExprContext *econtext,
pfree(ecxt_callback);
}
else
{
prev_callback = &ecxt_callback->next;
}
}
}
@@ -807,7 +805,7 @@ UnregisterExprContextCallback(ExprContext *econtext,
static void
ShutdownExprContext(ExprContext *econtext)
{
ExprContext_CB *ecxt_callback;
ExprContext_CB *ecxt_callback;
/*
* Call each callback function in reverse registration order.

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.56 2002/08/29 00:17:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.57 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -269,7 +269,7 @@ postquel_start(execution_state *es)
static TupleTableSlot *
postquel_getnext(execution_state *es)
{
long count;
long count;
if (es->qd->operation == CMD_UTILITY)
{
@@ -566,8 +566,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
elog(ERROR, "Set-valued function called in context that cannot accept a set");
/*
* Ensure we will get shut down cleanly if the exprcontext is
* not run to completion.
* Ensure we will get shut down cleanly if the exprcontext is not
* run to completion.
*/
if (!fcache->shutdown_reg)
{

View File

@@ -46,7 +46,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.85 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.86 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -877,8 +877,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
&peraggstate->transtypeByVal);
/*
* initval is potentially null, so don't try to access it as a struct
* field. Must do it the hard way with SysCacheGetAttr.
* initval is potentially null, so don't try to access it as a
* struct field. Must do it the hard way with SysCacheGetAttr.
*/
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
Anum_pg_aggregate_agginitval,
@@ -907,8 +907,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
/*
* Note: use the type from the input expression here, not
* from pg_proc.proargtypes, because the latter might be 0.
* Note: use the type from the input expression here, not from
* pg_proc.proargtypes, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
@@ -921,8 +921,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
if (aggref->aggdistinct)
{
/*
* Note: use the type from the input expression here, not
* from pg_proc.proargtypes, because the latter might be 0.
* Note: use the type from the input expression here, not from
* pg_proc.proargtypes, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeFunctionscan.c,v 1.11 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeFunctionscan.c,v 1.12 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,13 +50,13 @@ static bool tupledesc_mismatch(TupleDesc tupdesc1, TupleDesc tupdesc2);
static TupleTableSlot *
FunctionNext(FunctionScan *node)
{
TupleTableSlot *slot;
EState *estate;
ScanDirection direction;
Tuplestorestate *tuplestorestate;
FunctionScanState *scanstate;
bool should_free;
HeapTuple heapTuple;
TupleTableSlot *slot;
EState *estate;
ScanDirection direction;
Tuplestorestate *tuplestorestate;
FunctionScanState *scanstate;
bool should_free;
HeapTuple heapTuple;
/*
* get information from the estate and scan state
@@ -69,12 +69,13 @@ FunctionNext(FunctionScan *node)
/*
* If first time through, read all tuples from function and put them
* in a tuplestore. Subsequent calls just fetch tuples from tuplestore.
* in a tuplestore. Subsequent calls just fetch tuples from
* tuplestore.
*/
if (tuplestorestate == NULL)
{
ExprContext *econtext = scanstate->csstate.cstate.cs_ExprContext;
TupleDesc funcTupdesc;
ExprContext *econtext = scanstate->csstate.cstate.cs_ExprContext;
TupleDesc funcTupdesc;
scanstate->tuplestorestate = tuplestorestate =
ExecMakeTableFunctionResult((Expr *) scanstate->funcexpr,
@@ -83,9 +84,9 @@ FunctionNext(FunctionScan *node)
&funcTupdesc);
/*
* If function provided a tupdesc, cross-check it. We only really
* need to do this for functions returning RECORD, but might as well
* do it always.
* If function provided a tupdesc, cross-check it. We only really
* need to do this for functions returning RECORD, but might as
* well do it always.
*/
if (funcTupdesc &&
tupledesc_mismatch(scanstate->tupdesc, funcTupdesc))
@@ -98,7 +99,7 @@ FunctionNext(FunctionScan *node)
slot = scanstate->csstate.css_ScanTupleSlot;
if (tuplestorestate)
heapTuple = tuplestore_getheaptuple(tuplestorestate,
ScanDirectionIsForward(direction),
ScanDirectionIsForward(direction),
&should_free);
else
{
@@ -135,11 +136,11 @@ ExecFunctionScan(FunctionScan *node)
bool
ExecInitFunctionScan(FunctionScan *node, EState *estate, Plan *parent)
{
FunctionScanState *scanstate;
RangeTblEntry *rte;
Oid funcrettype;
char functyptype;
TupleDesc tupdesc = NULL;
FunctionScanState *scanstate;
RangeTblEntry *rte;
Oid funcrettype;
char functyptype;
TupleDesc tupdesc = NULL;
/*
* FunctionScan should not have any children.
@@ -266,8 +267,8 @@ ExecCountSlotsFunctionScan(FunctionScan *node)
void
ExecEndFunctionScan(FunctionScan *node)
{
FunctionScanState *scanstate;
EState *estate;
FunctionScanState *scanstate;
EState *estate;
/*
* get information from node
@@ -308,7 +309,7 @@ ExecEndFunctionScan(FunctionScan *node)
void
ExecFunctionMarkPos(FunctionScan *node)
{
FunctionScanState *scanstate;
FunctionScanState *scanstate;
scanstate = (FunctionScanState *) node->scan.scanstate;
@@ -330,7 +331,7 @@ ExecFunctionMarkPos(FunctionScan *node)
void
ExecFunctionRestrPos(FunctionScan *node)
{
FunctionScanState *scanstate;
FunctionScanState *scanstate;
scanstate = (FunctionScanState *) node->scan.scanstate;
@@ -352,7 +353,7 @@ ExecFunctionRestrPos(FunctionScan *node)
void
ExecFunctionReScan(FunctionScan *node, ExprContext *exprCtxt, Plan *parent)
{
FunctionScanState *scanstate;
FunctionScanState *scanstate;
/*
* get information from node

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* $Id: nodeHash.c,v 1.65 2002/09/02 02:47:02 momjian Exp $
* $Id: nodeHash.c,v 1.66 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
#include "utils/lsyscache.h"
static uint32 hashFunc(Datum key, int typLen, bool byVal);
static uint32 hashFunc(Datum key, int typLen, bool byVal);
/* ----------------------------------------------------------------
* ExecHash
@@ -639,11 +639,11 @@ hashFunc(Datum key, int typLen, bool byVal)
{
/*
* If it's a by-value data type, just hash the whole Datum value.
* This assumes that datatypes narrower than Datum are consistently
* padded (either zero-extended or sign-extended, but not random
* bits) to fill Datum; see the XXXGetDatum macros in postgres.h.
* NOTE: it would not work to do hash_any(&key, len) since this
* would get the wrong bytes on a big-endian machine.
* This assumes that datatypes narrower than Datum are
* consistently padded (either zero-extended or sign-extended, but
* not random bits) to fill Datum; see the XXXGetDatum macros in
* postgres.h. NOTE: it would not work to do hash_any(&key, len)
* since this would get the wrong bytes on a big-endian machine.
*/
k = (unsigned char *) &key;
typLen = sizeof(Datum);
@@ -658,14 +658,14 @@ hashFunc(Datum key, int typLen, bool byVal)
else if (typLen == -1)
{
/*
* It's a varlena type, so 'key' points to a
* "struct varlena". NOTE: VARSIZE returns the
* "real" data length plus the sizeof the "vl_len" attribute of
* varlena (the length information). 'key' points to the beginning
* of the varlena struct, so we have to use "VARDATA" to find the
* beginning of the "real" data. Also, we have to be careful to
* detoast the datum if it's toasted. (We don't worry about
* freeing the detoasted copy; that happens for free when the
* It's a varlena type, so 'key' points to a "struct varlena".
* NOTE: VARSIZE returns the "real" data length plus the
* sizeof the "vl_len" attribute of varlena (the length
* information). 'key' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning
* of the "real" data. Also, we have to be careful to detoast
* the datum if it's toasted. (We don't worry about freeing
* the detoasted copy; that happens for free when the
* per-tuple memory context is reset in ExecHashGetBucket.)
*/
struct varlena *vkey = PG_DETOAST_DATUM(key);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.70 2002/06/23 21:29:32 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.71 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -165,22 +165,21 @@ IndexNext(IndexScan *node)
while ((tuple = index_getnext(scandesc, direction)) != NULL)
{
/*
* store the scanned tuple in the scan tuple slot of the
* scan state. Note: we pass 'false' because tuples
* returned by amgetnext are pointers onto disk pages and
* must not be pfree()'d.
* store the scanned tuple in the scan tuple slot of the scan
* state. Note: we pass 'false' because tuples returned by
* amgetnext are pointers onto disk pages and must not be
* pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->xs_cbuf, /* buffer containing tuple */
false); /* don't pfree */
scandesc->xs_cbuf, /* buffer containing tuple */
false); /* don't pfree */
/*
* We must check to see if the current tuple was already
* matched by an earlier index, so we don't double-report
* it. We do this by passing the tuple through ExecQual
* and checking for failure with all previous
* qualifications.
* matched by an earlier index, so we don't double-report it.
* We do this by passing the tuple through ExecQual and
* checking for failure with all previous qualifications.
*/
if (indexstate->iss_IndexPtr > 0)
{
@@ -485,8 +484,9 @@ ExecEndIndexScan(IndexScan *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
* ExecInitIndexScan. This lock should be held till end of transaction.
* (There is a faction that considers this too much locking, however.)
* ExecInitIndexScan. This lock should be held till end of
* transaction. (There is a faction that considers this too much
* locking, however.)
*/
heap_close(relation, NoLock);
@@ -1009,7 +1009,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
elog(ERROR, "indexes of the relation %u was inactivated", reloid);
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = NULL; /* no heap scan here */
scanstate->css_currentScanDesc = NULL; /* no heap scan here */
/*
* get the scan type from the relation descriptor.

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.50 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.51 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
* This takes the mergeclause which is a qualification of the
* form ((= expr expr) (= expr expr) ...) and forms new lists
* of the forms ((< expr expr) (< expr expr) ...) and
* ((> expr expr) (> expr expr) ...). These lists will be used
* ((> expr expr) (> expr expr) ...). These lists will be used
* by ExecMergeJoin() to determine if we should skip tuples.
* (We expect there to be suitable operators because the "=" operators
* were marked mergejoinable; however, there might be a different

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.36 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.37 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -284,8 +284,9 @@ ExecEndSeqScan(SeqScan *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
* InitScanRelation. This lock should be held till end of transaction.
* (There is a faction that considers this too much locking, however.)
* InitScanRelation. This lock should be held till end of
* transaction. (There is a faction that considers this too much
* locking, however.)
*/
heap_close(relation, NoLock);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.25 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.26 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,8 +109,8 @@ TidNext(TidScan *node)
return slot; /* return empty slot */
/*
* XXX shouldn't we check here to make sure tuple matches TID list?
* In runtime-key case this is not certain, is it?
* XXX shouldn't we check here to make sure tuple matches TID
* list? In runtime-key case this is not certain, is it?
*/
ExecStoreTuple(estate->es_evTuple[node->scan.scanrelid - 1],
@@ -468,7 +468,7 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
currentRelation = heap_open(reloid, AccessShareLock);
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = NULL; /* no heap scan here */
scanstate->css_currentScanDesc = NULL; /* no heap scan here */
/*
* get the scan type from the relation descriptor.

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.73 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.74 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -433,6 +433,7 @@ SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
if (i == natts) /* no errors in *attnum */
{
mtuple = heap_formtuple(rel->rd_att, v, n);
/*
* copy the identification info of the old tuple: t_ctid, t_self,
* and OID (if any)
@@ -1098,7 +1099,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, char *Nulls, int tcount)
ParamListInfo paramLI;
paramLI = (ParamListInfo) palloc((nargs + 1) *
sizeof(ParamListInfoData));
sizeof(ParamListInfoData));
MemSet(paramLI, 0, (nargs + 1) * sizeof(ParamListInfoData));
state->es_param_list_info = paramLI;
@@ -1266,9 +1267,9 @@ _SPI_cursor_operation(Portal portal, bool forward, int count,
ExecutorRun(querydesc, estate, direction, (long) count);
if (estate->es_processed > 0)
portal->atStart = false; /* OK to back up now */
portal->atStart = false; /* OK to back up now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atEnd = true; /* we retrieved 'em all */
portal->atEnd = true; /* we retrieved 'em all */
}
else
{
@@ -1280,9 +1281,9 @@ _SPI_cursor_operation(Portal portal, bool forward, int count,
ExecutorRun(querydesc, estate, direction, (long) count);
if (estate->es_processed > 0)
portal->atEnd = false; /* OK to go forward now */
portal->atEnd = false; /* OK to go forward now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atStart = true; /* we retrieved 'em all */
portal->atStart = true; /* we retrieved 'em all */
}
_SPI_current->processed = estate->es_processed;

View File

@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: stringinfo.c,v 1.31 2002/06/20 20:29:28 momjian Exp $
* $Id: stringinfo.c,v 1.32 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -120,7 +120,7 @@ appendStringInfo(StringInfo str, const char *fmt,...)
* an example of a platform with such a bug.
*/
#ifdef USE_ASSERT_CHECKING
str->data[str->maxlen-1] = '\0';
str->data[str->maxlen - 1] = '\0';
#endif
va_start(args, fmt);
@@ -128,7 +128,7 @@ appendStringInfo(StringInfo str, const char *fmt,...)
fmt, args);
va_end(args);
Assert(str->data[str->maxlen-1] == '\0');
Assert(str->data[str->maxlen - 1] == '\0');
/*
* Note: some versions of vsnprintf return the number of chars

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.89 2002/09/02 02:47:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.90 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,7 +130,6 @@ pg_krb4_recvauth(Port *port)
elog(LOG, "pg_krb4_recvauth: Kerberos not implemented on this server");
return STATUS_ERROR;
}
#endif /* KRB4 */
@@ -310,7 +309,6 @@ pg_krb5_recvauth(Port *port)
elog(LOG, "pg_krb5_recvauth: Kerberos not implemented on this server");
return STATUS_ERROR;
}
#endif /* KRB5 */
@@ -416,7 +414,7 @@ ClientAuthentication(Port *port)
if (port->raddr.sa.sa_family == AF_INET)
hostinfo = inet_ntoa(port->raddr.in.sin_addr);
elog(FATAL,
"No pg_hba.conf entry for host %s, user %s, database %s",
"No pg_hba.conf entry for host %s, user %s, database %s",
hostinfo, port->user, port->database);
break;
}
@@ -513,8 +511,8 @@ sendAuthRequest(Port *port, AuthRequest areq)
pq_endmessage(&buf);
/*
* Flush message so client will see it, except for AUTH_REQ_OK,
* which need not be sent until we are ready for queries.
* Flush message so client will see it, except for AUTH_REQ_OK, which
* need not be sent until we are ready for queries.
*/
if (areq != AUTH_REQ_OK)
pq_flush();
@@ -688,7 +686,7 @@ CheckPAMAuth(Port *port, char *user, char *password)
pam_strerror(pamh, retval));
}
pam_passwd = NULL; /* Unset pam_passwd */
pam_passwd = NULL; /* Unset pam_passwd */
return (retval == PAM_SUCCESS ? STATUS_OK : STATUS_ERROR);
}
@@ -714,13 +712,14 @@ recv_and_check_password_packet(Port *port)
pfree(buf.data);
return STATUS_EOF;
}
/*
* We don't actually use the password packet length the frontend
* sent us; however, it's a reasonable sanity check to ensure that
* we actually read as much data as we expected to.
* We don't actually use the password packet length the frontend sent
* us; however, it's a reasonable sanity check to ensure that we
* actually read as much data as we expected to.
*
* The password packet size is the length of the buffer, plus the
* size field itself (4 bytes), plus a 1-byte terminator.
* The password packet size is the length of the buffer, plus the size
* field itself (4 bytes), plus a 1-byte terminator.
*/
if (len != (buf.len + 4 + 1))
elog(LOG, "unexpected password packet size: read %d, expected %d",

View File

@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.12 2002/09/02 02:47:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.13 2002/09/04 20:31:19 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -30,13 +30,13 @@
* impersonations.
*
* Another benefit of EDH is that it allows the backend and
* clients to use DSA keys. DSA keys can only provide digital
* clients to use DSA keys. DSA keys can only provide digital
* signatures, not encryption, and are often acceptable in
* jurisdictions where RSA keys are unacceptable.
*
* The downside to EDH is that it makes it impossible to
* use ssldump(1) if there's a problem establishing an SSL
* session. In this case you'll need to temporarily disable
* session. In this case you'll need to temporarily disable
* EDH by commenting out the callback.
*
* ...
@@ -49,7 +49,7 @@
* milestone 1: fix basic coding errors
* [*] existing SSL code pulled out of existing files.
* [*] SSL_get_error() after SSL_read() and SSL_write(),
* SSL_shutdown(), default to TLSv1.
* SSL_shutdown(), default to TLSv1.
*
* milestone 2: provide endpoint authentication (server)
* [*] client verifies server cert
@@ -112,22 +112,22 @@
extern void ExitPostmaster(int);
extern void postmaster_error(const char *fmt,...);
int secure_initialize(void);
void secure_destroy(void);
int secure_open_server(Port *);
void secure_close(Port *);
ssize_t secure_read(Port *, void *ptr, size_t len);
ssize_t secure_write(Port *, const void *ptr, size_t len);
int secure_initialize(void);
void secure_destroy(void);
int secure_open_server(Port *);
void secure_close(Port *);
ssize_t secure_read(Port *, void *ptr, size_t len);
ssize_t secure_write(Port *, const void *ptr, size_t len);
#ifdef USE_SSL
static DH *load_dh_file(int keylength);
static DH *load_dh_buffer(const char *, size_t);
static DH *tmp_dh_cb(SSL *s, int is_export, int keylength);
static int verify_cb(int, X509_STORE_CTX *);
static DH *load_dh_file(int keylength);
static DH *load_dh_buffer(const char *, size_t);
static DH *tmp_dh_cb(SSL *s, int is_export, int keylength);
static int verify_cb(int, X509_STORE_CTX *);
static void info_cb(SSL *ssl, int type, int args);
static int initialize_SSL(void);
static int initialize_SSL(void);
static void destroy_SSL(void);
static int open_server_SSL(Port *);
static int open_server_SSL(Port *);
static void close_SSL(Port *);
static const char *SSLerrmessage(void);
#endif
@@ -137,13 +137,13 @@ static const char *SSLerrmessage(void);
* How much data can be sent across a secure connection
* (total in both directions) before we require renegotiation.
*/
#define RENEGOTIATION_LIMIT (64 * 1024)
#define CA_PATH NULL
#define RENEGOTIATION_LIMIT (64 * 1024)
#define CA_PATH NULL
static SSL_CTX *SSL_context = NULL;
#endif
/* ------------------------------------------------------------ */
/* Hardcoded values */
/* Hardcoded values */
/* ------------------------------------------------------------ */
/*
@@ -207,16 +207,16 @@ KWbuHn491xNO25CQWMtem80uKw+pTnisBRF/454n1Jnhub144YRBoN8CAQI=\n\
-----END DH PARAMETERS-----\n";
/* ------------------------------------------------------------ */
/* Procedures common to all secure sessions */
/* Procedures common to all secure sessions */
/* ------------------------------------------------------------ */
/*
* Initialize global context
*/
int
secure_initialize (void)
secure_initialize(void)
{
int r = 0;
int r = 0;
#ifdef USE_SSL
r = initialize_SSL();
@@ -229,7 +229,7 @@ secure_initialize (void)
* Destroy global context
*/
void
secure_destroy (void)
secure_destroy(void)
{
#ifdef USE_SSL
destroy_SSL();
@@ -240,9 +240,9 @@ secure_destroy (void)
* Attempt to negotiate secure session.
*/
int
secure_open_server (Port *port)
secure_open_server(Port *port)
{
int r = 0;
int r = 0;
#ifdef USE_SSL
r = open_server_SSL(port);
@@ -255,7 +255,7 @@ secure_open_server (Port *port)
* Close secure session.
*/
void
secure_close (Port *port)
secure_close(Port *port)
{
#ifdef USE_SSL
if (port->ssl)
@@ -267,9 +267,9 @@ secure_close (Port *port)
* Read data from a secure connection.
*/
ssize_t
secure_read (Port *port, void *ptr, size_t len)
secure_read(Port *port, void *ptr, size_t len)
{
ssize_t n;
ssize_t n;
#ifdef USE_SSL
if (port->ssl)
@@ -283,28 +283,28 @@ secure_read (Port *port, void *ptr, size_t len)
n = SSL_read(port->ssl, ptr, len);
switch (SSL_get_error(port->ssl, n))
{
case SSL_ERROR_NONE:
port->count += n;
break;
case SSL_ERROR_WANT_READ:
break;
case SSL_ERROR_SYSCALL:
errno = get_last_socket_error();
elog(ERROR, "SSL SYSCALL error: %s", strerror(errno));
break;
case SSL_ERROR_SSL:
elog(ERROR, "SSL error: %s", SSLerrmessage());
/* fall through */
case SSL_ERROR_ZERO_RETURN:
secure_close(port);
errno = ECONNRESET;
n = -1;
break;
case SSL_ERROR_NONE:
port->count += n;
break;
case SSL_ERROR_WANT_READ:
break;
case SSL_ERROR_SYSCALL:
errno = get_last_socket_error();
elog(ERROR, "SSL SYSCALL error: %s", strerror(errno));
break;
case SSL_ERROR_SSL:
elog(ERROR, "SSL error: %s", SSLerrmessage());
/* fall through */
case SSL_ERROR_ZERO_RETURN:
secure_close(port);
errno = ECONNRESET;
n = -1;
break;
}
}
else
#endif
n = recv(port->sock, ptr, len, 0);
n = recv(port->sock, ptr, len, 0);
return n;
}
@@ -313,12 +313,12 @@ secure_read (Port *port, void *ptr, size_t len)
* Write data to a secure connection.
*/
ssize_t
secure_write (Port *port, const void *ptr, size_t len)
secure_write(Port *port, const void *ptr, size_t len)
{
ssize_t n;
ssize_t n;
#ifndef WIN32
pqsigfunc oldsighandler = pqsignal(SIGPIPE, SIG_IGN);
pqsigfunc oldsighandler = pqsignal(SIGPIPE, SIG_IGN);
#endif
#ifdef USE_SSL
@@ -333,28 +333,28 @@ secure_write (Port *port, const void *ptr, size_t len)
n = SSL_write(port->ssl, ptr, len);
switch (SSL_get_error(port->ssl, n))
{
case SSL_ERROR_NONE:
port->count += n;
break;
case SSL_ERROR_WANT_WRITE:
break;
case SSL_ERROR_SYSCALL:
errno = get_last_socket_error();
elog(ERROR, "SSL SYSCALL error: %s", strerror(errno));
break;
case SSL_ERROR_SSL:
elog(ERROR, "SSL error: %s", SSLerrmessage());
/* fall through */
case SSL_ERROR_ZERO_RETURN:
secure_close(port);
errno = ECONNRESET;
n = -1;
break;
case SSL_ERROR_NONE:
port->count += n;
break;
case SSL_ERROR_WANT_WRITE:
break;
case SSL_ERROR_SYSCALL:
errno = get_last_socket_error();
elog(ERROR, "SSL SYSCALL error: %s", strerror(errno));
break;
case SSL_ERROR_SSL:
elog(ERROR, "SSL error: %s", SSLerrmessage());
/* fall through */
case SSL_ERROR_ZERO_RETURN:
secure_close(port);
errno = ECONNRESET;
n = -1;
break;
}
}
else
#endif
n = send(port->sock, ptr, len, 0);
n = send(port->sock, ptr, len, 0);
#ifndef WIN32
pqsignal(SIGPIPE, oldsighandler);
@@ -364,7 +364,7 @@ secure_write (Port *port, const void *ptr, size_t len)
}
/* ------------------------------------------------------------ */
/* SSL specific code */
/* SSL specific code */
/* ------------------------------------------------------------ */
#ifdef USE_SSL
/*
@@ -374,13 +374,13 @@ secure_write (Port *port, const void *ptr, size_t len)
* to verify that the DBA-generated DH parameters file contains
* what we expect it to contain.
*/
static DH *
load_dh_file (int keylength)
static DH *
load_dh_file(int keylength)
{
FILE *fp;
char fnbuf[2048];
DH *dh = NULL;
int codes;
FILE *fp;
char fnbuf[2048];
DH *dh = NULL;
int codes;
/* attempt to open file. It's not an error if it doesn't exist. */
snprintf(fnbuf, sizeof fnbuf, "%s/dh%d.pem", DataDir, keylength);
@@ -393,10 +393,10 @@ load_dh_file (int keylength)
fclose(fp);
/* is the prime the correct size? */
if (dh != NULL && 8*DH_size(dh) < keylength)
if (dh != NULL && 8 * DH_size(dh) < keylength)
{
elog(LOG, "DH errors (%s): %d bits expected, %d bits found",
fnbuf, keylength, 8*DH_size(dh));
fnbuf, keylength, 8 * DH_size(dh));
dh = NULL;
}
@@ -417,8 +417,8 @@ load_dh_file (int keylength)
(codes & DH_CHECK_P_NOT_SAFE_PRIME))
{
elog(LOG,
"DH error (%s): neither suitable generator or safe prime",
fnbuf);
"DH error (%s): neither suitable generator or safe prime",
fnbuf);
return NULL;
}
}
@@ -432,11 +432,11 @@ load_dh_file (int keylength)
* To prevent problems if the DH parameters files don't even
* exist, we can load DH parameters hardcoded into this file.
*/
static DH *
load_dh_buffer (const char *buffer, size_t len)
static DH *
load_dh_buffer(const char *buffer, size_t len)
{
BIO *bio;
DH *dh = NULL;
BIO *bio;
DH *dh = NULL;
bio = BIO_new_mem_buf((char *) buffer, len);
if (bio == NULL)
@@ -462,58 +462,58 @@ load_dh_buffer (const char *buffer, size_t len)
* the OpenSSL library can efficiently generate random keys from
* the information provided.
*/
static DH *
tmp_dh_cb (SSL *s, int is_export, int keylength)
static DH *
tmp_dh_cb(SSL *s, int is_export, int keylength)
{
DH *r = NULL;
static DH *dh = NULL;
static DH *dh512 = NULL;
static DH *dh1024 = NULL;
static DH *dh2048 = NULL;
static DH *dh4096 = NULL;
DH *r = NULL;
static DH *dh = NULL;
static DH *dh512 = NULL;
static DH *dh1024 = NULL;
static DH *dh2048 = NULL;
static DH *dh4096 = NULL;
switch (keylength)
{
case 512:
if (dh512 == NULL)
dh512 = load_dh_file(keylength);
if (dh512 == NULL)
dh512 = load_dh_buffer(file_dh512, sizeof file_dh512);
r = dh512;
break;
case 512:
if (dh512 == NULL)
dh512 = load_dh_file(keylength);
if (dh512 == NULL)
dh512 = load_dh_buffer(file_dh512, sizeof file_dh512);
r = dh512;
break;
case 1024:
if (dh1024 == NULL)
dh1024 = load_dh_file(keylength);
if (dh1024 == NULL)
dh1024 = load_dh_buffer(file_dh1024, sizeof file_dh1024);
r = dh1024;
break;
case 1024:
if (dh1024 == NULL)
dh1024 = load_dh_file(keylength);
if (dh1024 == NULL)
dh1024 = load_dh_buffer(file_dh1024, sizeof file_dh1024);
r = dh1024;
break;
case 2048:
if (dh2048 == NULL)
dh2048 = load_dh_file(keylength);
if (dh2048 == NULL)
dh2048 = load_dh_buffer(file_dh2048, sizeof file_dh2048);
r = dh2048;
break;
case 2048:
if (dh2048 == NULL)
dh2048 = load_dh_file(keylength);
if (dh2048 == NULL)
dh2048 = load_dh_buffer(file_dh2048, sizeof file_dh2048);
r = dh2048;
break;
case 4096:
if (dh4096 == NULL)
dh4096 = load_dh_file(keylength);
if (dh4096 == NULL)
dh4096 = load_dh_buffer(file_dh4096, sizeof file_dh4096);
r = dh4096;
break;
case 4096:
if (dh4096 == NULL)
dh4096 = load_dh_file(keylength);
if (dh4096 == NULL)
dh4096 = load_dh_buffer(file_dh4096, sizeof file_dh4096);
r = dh4096;
break;
default:
if (dh == NULL)
dh = load_dh_file(keylength);
r = dh;
default:
if (dh == NULL)
dh = load_dh_file(keylength);
r = dh;
}
/* this may take a long time, but it may be necessary... */
if (r == NULL || 8*DH_size(r) < keylength)
if (r == NULL || 8 * DH_size(r) < keylength)
{
elog(DEBUG1, "DH: generating parameters (%d bits)....", keylength);
r = DH_generate_parameters(keylength, DH_GENERATOR_2, NULL, NULL);
@@ -534,7 +534,7 @@ tmp_dh_cb (SSL *s, int is_export, int keylength)
* for now we accept the default checks.
*/
static int
verify_cb (int ok, X509_STORE_CTX *ctx)
verify_cb(int ok, X509_STORE_CTX *ctx)
{
return ok;
}
@@ -544,7 +544,7 @@ verify_cb (int ok, X509_STORE_CTX *ctx)
* into the PostgreSQL log.
*/
static void
info_cb (SSL *ssl, int type, int args)
info_cb(SSL *ssl, int type, int args)
{
switch (type)
{
@@ -579,9 +579,9 @@ info_cb (SSL *ssl, int type, int args)
* Initialize global SSL context.
*/
static int
initialize_SSL (void)
initialize_SSL(void)
{
char fnbuf[2048];
char fnbuf[2048];
struct stat buf;
if (!SSL_context)
@@ -597,7 +597,7 @@ initialize_SSL (void)
}
/*
* Load and verify certificate and private key
* Load and verify certificate and private key
*/
snprintf(fnbuf, sizeof(fnbuf), "%s/server.crt", DataDir);
if (!SSL_CTX_use_certificate_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
@@ -647,7 +647,7 @@ initialize_SSL (void)
ExitPostmaster(1);
}
SSL_CTX_set_verify(SSL_context,
SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE, verify_cb);
SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE, verify_cb);
return 0;
}
@@ -656,7 +656,7 @@ initialize_SSL (void)
* Destroy global SSL context.
*/
static void
destroy_SSL (void)
destroy_SSL(void)
{
if (SSL_context)
{
@@ -669,7 +669,7 @@ destroy_SSL (void)
* Attempt to negotiate SSL connection.
*/
static int
open_server_SSL (Port *port)
open_server_SSL(Port *port)
{
if (!(port->ssl = SSL_new(SSL_context)) ||
!SSL_set_fd(port->ssl, port->sock) ||
@@ -685,17 +685,17 @@ open_server_SSL (Port *port)
port->peer = SSL_get_peer_certificate(port->ssl);
if (port->peer == NULL)
{
strncpy(port->peer_dn, "(anonymous)", sizeof (port->peer_dn));
strncpy(port->peer_cn, "(anonymous)", sizeof (port->peer_cn));
strncpy(port->peer_dn, "(anonymous)", sizeof(port->peer_dn));
strncpy(port->peer_cn, "(anonymous)", sizeof(port->peer_cn));
}
else
{
X509_NAME_oneline(X509_get_subject_name(port->peer),
port->peer_dn, sizeof (port->peer_dn));
port->peer_dn[sizeof(port->peer_dn)-1] = '\0';
port->peer_dn, sizeof(port->peer_dn));
port->peer_dn[sizeof(port->peer_dn) - 1] = '\0';
X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer),
NID_commonName, port->peer_cn, sizeof (port->peer_cn));
port->peer_cn[sizeof(port->peer_cn)-1] = '\0';
NID_commonName, port->peer_cn, sizeof(port->peer_cn));
port->peer_cn[sizeof(port->peer_cn) - 1] = '\0';
}
elog(DEBUG1, "secure connection from '%s'", port->peer_cn);
@@ -709,7 +709,7 @@ open_server_SSL (Port *port)
* Close SSL connection.
*/
static void
close_SSL (Port *port)
close_SSL(Port *port)
{
if (port->ssl)
{
@@ -729,9 +729,9 @@ close_SSL (Port *port)
static const char *
SSLerrmessage(void)
{
unsigned long errcode;
const char *errreason;
static char errbuf[32];
unsigned long errcode;
const char *errreason;
static char errbuf[32];
errcode = ERR_get_error();
if (errcode == 0)
@@ -743,4 +743,4 @@ SSLerrmessage(void)
return errbuf;
}
#endif /* USE_SSL */
#endif /* USE_SSL */

View File

@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.48 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.49 2002/09/04 20:31:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,9 +35,9 @@ md5_crypt_verify(const Port *port, const char *user, const char *pgpass)
*valuntil = NULL,
*crypt_pwd;
int retval = STATUS_ERROR;
List **line;
List *token;
List **line;
List *token;
if ((line = get_user_line(user)) == NULL)
return STATUS_ERROR;
@@ -50,7 +50,7 @@ md5_crypt_verify(const Port *port, const char *user, const char *pgpass)
if (token)
valuntil = lfirst(token);
}
if (passwd == NULL || *passwd == '\0')
return STATUS_ERROR;

View File

@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.86 2002/09/02 02:47:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.87 2002/09/04 20:31:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@
/* Max size of username ident server can return */
/* This is used to separate values in multi-valued column strings */
#define MULTI_VALUE_SEP "\001"
#define MULTI_VALUE_SEP "\001"
/*
* These variables hold the pre-parsed contents of the hba and ident
@@ -54,14 +54,16 @@
*/
static List *hba_lines = NIL; /* pre-parsed contents of hba file */
static List *ident_lines = NIL; /* pre-parsed contents of ident file */
static List *group_lines = NIL; /* pre-parsed contents of group file */
static List *user_lines = NIL; /* pre-parsed contents of user password file */
static List *group_lines = NIL; /* pre-parsed contents of group file */
static List *user_lines = NIL; /* pre-parsed contents of user password
* file */
/* sorted entries so we can do binary search lookups */
static List **user_sorted = NULL; /* sorted user list, for bsearch() */
static List **group_sorted = NULL; /* sorted group list, for bsearch() */
static int user_length;
static int group_length;
static List **user_sorted = NULL; /* sorted user list, for bsearch() */
static List **group_sorted = NULL; /* sorted group list, for
* bsearch() */
static int user_length;
static int group_length;
static List *tokenize_file(FILE *file);
static char *tokenize_inc_file(const char *inc_filename);
@@ -78,14 +80,14 @@ isblank(const char c)
/*
* Grab one token out of fp. Tokens are strings of non-blank
* characters bounded by blank characters, beginning of line, and
* end of line. Blank means space or tab. Return the token as
* *buf. Leave file positioned to character immediately after the
* token or EOF, whichever comes first. If no more tokens on line,
* return null string as *buf and position file to beginning of
* next line or EOF, whichever comes first. Allow spaces in quoted
* strings. Terminate on unquoted commas. Handle comments.
* Grab one token out of fp. Tokens are strings of non-blank
* characters bounded by blank characters, beginning of line, and
* end of line. Blank means space or tab. Return the token as
* *buf. Leave file positioned to character immediately after the
* token or EOF, whichever comes first. If no more tokens on line,
* return null string as *buf and position file to beginning of
* next line or EOF, whichever comes first. Allow spaces in quoted
* strings. Terminate on unquoted commas. Handle comments.
*/
void
next_token(FILE *fp, char *buf, const int bufsz)
@@ -102,8 +104,8 @@ next_token(FILE *fp, char *buf, const int bufsz)
if (c != EOF && c != '\n')
{
/*
* Build a token in buf of next characters up to EOF, EOL, unquoted
* comma, or unquoted whitespace.
* Build a token in buf of next characters up to EOF, EOL,
* unquoted comma, or unquoted whitespace.
*/
while (c != EOF && c != '\n' &&
(!isblank(c) || in_quote == true))
@@ -156,9 +158,9 @@ next_token(FILE *fp, char *buf, const int bufsz)
}
/*
* Tokenize file and handle file inclusion and comma lists. We have
* to break apart the commas to expand any file names then
* reconstruct with commas.
* Tokenize file and handle file inclusion and comma lists. We have
* to break apart the commas to expand any file names then
* reconstruct with commas.
*/
static char *
next_token_expand(FILE *file)
@@ -174,17 +176,17 @@ next_token_expand(FILE *file)
if (!*buf)
break;
if (buf[strlen(buf)-1] == ',')
if (buf[strlen(buf) - 1] == ',')
{
trailing_comma = true;
buf[strlen(buf)-1] = '\0';
buf[strlen(buf) - 1] = '\0';
}
else
trailing_comma = false;
/* Is this referencing a file? */
if (buf[0] == '@')
incbuf = tokenize_inc_file(buf+1);
incbuf = tokenize_inc_file(buf + 1);
else
incbuf = pstrdup(buf);
@@ -238,7 +240,7 @@ tokenize_inc_file(const char *inc_filename)
{
char *inc_fullname;
FILE *inc_file;
List *inc_lines;
List *inc_lines;
List *line;
char *comma_str = pstrdup("");
@@ -279,7 +281,7 @@ tokenize_inc_file(const char *inc_filename)
strcat(comma_str, MULTI_VALUE_SEP);
}
comma_str = repalloc(comma_str,
strlen(comma_str) + strlen(lfirst(token)) + 1);
strlen(comma_str) + strlen(lfirst(token)) + 1);
strcat(comma_str, lfirst(token));
}
}
@@ -341,9 +343,9 @@ tokenize_file(FILE *file)
static int
user_group_qsort_cmp(const void *list1, const void *list2)
{
/* first node is line number */
char *user1 = lfirst(lnext(*(List **)list1));
char *user2 = lfirst(lnext(*(List **)list2));
/* first node is line number */
char *user1 = lfirst(lnext(*(List **) list1));
char *user2 = lfirst(lnext(*(List **) list2));
return strcmp(user1, user2);
}
@@ -357,8 +359,8 @@ user_group_qsort_cmp(const void *list1, const void *list2)
static int
user_group_bsearch_cmp(const void *user, const void *list)
{
/* first node is line number */
char *user2 = lfirst(lnext(*(List **)list));
/* first node is line number */
char *user2 = lfirst(lnext(*(List **) list));
return strcmp(user, user2);
}
@@ -371,24 +373,24 @@ static List **
get_group_line(const char *group)
{
return (List **) bsearch((void *) group,
(void *) group_sorted,
group_length,
sizeof(List *),
user_group_bsearch_cmp);
(void *) group_sorted,
group_length,
sizeof(List *),
user_group_bsearch_cmp);
}
/*
* Lookup a user name in the pg_shadow file
*/
List **
List **
get_user_line(const char *user)
{
return (List **) bsearch((void *) user,
(void *) user_sorted,
user_length,
sizeof(List *),
user_group_bsearch_cmp);
(void *) user_sorted,
user_length,
sizeof(List *),
user_group_bsearch_cmp);
}
@@ -398,13 +400,14 @@ get_user_line(const char *user)
static int
check_group(char *group, char *user)
{
List **line, *l;
List **line,
*l;
if ((line = get_group_line(group)) != NULL)
{
foreach(l, lnext(lnext(*line)))
if (strcmp(lfirst(l), user) == 0)
return 1;
return 1;
}
return 0;
@@ -416,17 +419,17 @@ check_group(char *group, char *user)
static int
check_user(char *user, char *param_str)
{
char *tok;
char *tok;
for (tok = strtok(param_str, MULTI_VALUE_SEP); tok != NULL; tok = strtok(NULL, MULTI_VALUE_SEP))
{
if (tok[0] == '+')
{
if (check_group(tok+1, user))
if (check_group(tok + 1, user))
return 1;
}
else if (strcmp(tok, user) == 0 ||
strcmp(tok, "all") == 0)
strcmp(tok, "all") == 0)
return 1;
}
@@ -439,7 +442,7 @@ check_user(char *user, char *param_str)
static int
check_db(char *dbname, char *user, char *param_str)
{
char *tok;
char *tok;
for (tok = strtok(param_str, MULTI_VALUE_SEP); tok != NULL; tok = strtok(NULL, MULTI_VALUE_SEP))
{
@@ -744,7 +747,7 @@ void
load_group()
{
FILE *group_file;
List *line;
List *line;
if (group_lines)
free_lines(&group_lines);
@@ -761,7 +764,7 @@ load_group()
group_length = length(group_lines);
if (group_length)
{
int i = 0;
int i = 0;
group_sorted = palloc(group_length * sizeof(List *));
@@ -799,7 +802,7 @@ load_user()
user_length = length(user_lines);
if (user_length)
{
int i = 0;
int i = 0;
user_sorted = palloc(user_length * sizeof(List *));
@@ -825,7 +828,7 @@ load_hba(void)
{
int bufsize;
FILE *file; /* The config file we have to read */
char *conf_file; /* The name of the config file */
char *conf_file; /* The name of the config file */
if (hba_lines)
free_lines(&hba_lines);
@@ -935,8 +938,8 @@ check_ident_usermap(const char *usermap_name,
if (usermap_name[0] == '\0')
{
elog(LOG, "check_ident_usermap: hba configuration file does not "
"have the usermap field filled in in the entry that pertains "
"to this connection. That field is essential for Ident-based "
"have the usermap field filled in in the entry that pertains "
"to this connection. That field is essential for Ident-based "
"authentication.");
found_entry = false;
}
@@ -999,7 +1002,7 @@ load_ident(void)
/*
* Parse the string "*ident_response" as a response from a query to an Ident
* server. If it's a normal response indicating a user name, return true
* and store the user name at *ident_user. If it's anything else,
* and store the user name at *ident_user. If it's anything else,
* return false.
*/
static bool
@@ -1140,7 +1143,7 @@ ident_inet(const struct in_addr remote_ip_addr,
if (rc != 0)
{
/* save_errno is in case inet_ntoa changes errno */
int save_errno = errno;
int save_errno = errno;
elog(LOG, "Unable to connect to Ident server on the host which is "
"trying to connect to Postgres "
@@ -1157,12 +1160,13 @@ ident_inet(const struct in_addr remote_ip_addr,
snprintf(ident_query, 80, "%d,%d\n",
ntohs(remote_port), ntohs(local_port));
/* loop in case send is interrupted */
do {
do
{
rc = send(sock_fd, ident_query, strlen(ident_query), 0);
} while (rc < 0 && errno == EINTR);
if (rc < 0)
{
int save_errno = errno;
int save_errno = errno;
elog(LOG, "Unable to send query to Ident server on the host which is "
"trying to connect to Postgres (Host %s, Port %d), "
@@ -1179,11 +1183,11 @@ ident_inet(const struct in_addr remote_ip_addr,
sizeof(ident_response) - 1, 0);
if (rc < 0)
{
int save_errno = errno;
int save_errno = errno;
elog(LOG, "Unable to receive response from Ident server "
"on the host which is "
"trying to connect to Postgres (Host %s, Port %d), "
"trying to connect to Postgres (Host %s, Port %d), "
"even though we successfully sent our query to it: %s",
inet_ntoa(remote_ip_addr), IDENT_PORT,
strerror(save_errno));
@@ -1369,4 +1373,3 @@ hba_getauthmethod(hbaPort *port)
else
return STATUS_ERROR;
}

View File

@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.17 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.18 2002/09/04 20:31:19 momjian Exp $
*/
@@ -22,10 +22,10 @@
* NOTE:
*
* There are two copies of this file, one in backend/libpq and another
* in interfaces/odbc. They should be identical. This is done so ODBC
* in interfaces/odbc. They should be identical. This is done so ODBC
* can be compiled stand-alone.
*/
#ifndef MD5_ODBC
#include "postgres.h"
#include "libpq/crypt.h"

View File

@@ -29,7 +29,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: pqcomm.c,v 1.139 2002/09/03 21:45:42 petere Exp $
* $Id: pqcomm.c,v 1.140 2002/09/04 20:31:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -469,10 +469,9 @@ pq_recvbuf(void)
continue; /* Ok if interrupted */
/*
* Careful: an elog() that tries to write to the client
* would cause recursion to here, leading to stack overflow
* and core dump! This message must go *only* to the postmaster
* log.
* Careful: an elog() that tries to write to the client would
* cause recursion to here, leading to stack overflow and core
* dump! This message must go *only* to the postmaster log.
*/
elog(COMMERROR, "pq_recvbuf: recv() failed: %m");
return EOF;
@@ -574,12 +573,12 @@ pq_getstring(StringInfo s)
s->data[0] = '\0';
/* Read until we get the terminating '\0' */
for(;;)
for (;;)
{
while (PqRecvPointer >= PqRecvLength)
{
if (pq_recvbuf()) /* If nothing in buffer, then recv some */
return EOF; /* Failed to recv data */
if (pq_recvbuf()) /* If nothing in buffer, then recv some */
return EOF; /* Failed to recv data */
}
for (i = PqRecvPointer; i < PqRecvLength; i++)
@@ -589,7 +588,7 @@ pq_getstring(StringInfo s)
/* does not copy the \0 */
appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer,
i - PqRecvPointer);
PqRecvPointer = i + 1; /* advance past \0 */
PqRecvPointer = i + 1; /* advance past \0 */
return 0;
}
}
@@ -639,7 +638,7 @@ pq_putbytes(const char *s, size_t len)
int
pq_flush(void)
{
static int last_reported_send_errno = 0;
static int last_reported_send_errno = 0;
unsigned char *bufptr = PqSendBuffer;
unsigned char *bufend = PqSendBuffer + PqSendPointer;
@@ -656,13 +655,12 @@ pq_flush(void)
continue; /* Ok if we were interrupted */
/*
* Careful: an elog() that tries to write to the client
* would cause recursion to here, leading to stack overflow
* and core dump! This message must go *only* to the postmaster
* log.
* Careful: an elog() that tries to write to the client would
* cause recursion to here, leading to stack overflow and core
* dump! This message must go *only* to the postmaster log.
*
* If a client disconnects while we're in the midst of output,
* we might write quite a bit of data before we get to a safe
* If a client disconnects while we're in the midst of output, we
* might write quite a bit of data before we get to a safe
* query abort point. So, suppress duplicate log messages.
*/
if (errno != last_reported_send_errno)
@@ -679,7 +677,7 @@ pq_flush(void)
return EOF;
}
last_reported_send_errno = 0; /* reset after any successful send */
last_reported_send_errno = 0; /* reset after any successful send */
bufptr += r;
}

View File

@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.53 2002/08/09 22:52:04 petere Exp $
* $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.54 2002/09/04 20:31:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,21 +121,24 @@ main(int argc, char *argv[])
new_argv[argc] = NULL;
/*
* Set up locale information from environment. Note that CTYPE
* and COLLATE will be overridden later from pg_control if we are
* in an already-initialized database. We set them here so that
* they will be available to fill pg_control during initdb. The
* other ones will get reset later in InitializeGUCOptions, but we set
* them here to get already localized behavior during startup
* (e.g., error messages).
* Set up locale information from environment. Note that CTYPE and
* COLLATE will be overridden later from pg_control if we are in an
* already-initialized database. We set them here so that they will
* be available to fill pg_control during initdb. The other ones will
* get reset later in InitializeGUCOptions, but we set them here to
* get already localized behavior during startup (e.g., error
* messages).
*/
setlocale(LC_COLLATE, "");
setlocale(LC_CTYPE, "");
#ifdef LC_MESSAGES
setlocale(LC_MESSAGES, "");
#endif
/* We don't use these during startup. See also pg_locale.c about
* why these are set to "C". */
/*
* We don't use these during startup. See also pg_locale.c about why
* these are set to "C".
*/
setlocale(LC_MONETARY, "C");
setlocale(LC_NUMERIC, "C");
setlocale(LC_TIME, "C");

View File

@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.210 2002/09/02 02:13:01 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.211 2002/09/04 20:31:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1518,7 +1518,7 @@ _copyAExpr(A_Expr *from)
static ColumnRef *
_copyColumnRef(ColumnRef *from)
{
ColumnRef *newnode = makeNode(ColumnRef);
ColumnRef *newnode = makeNode(ColumnRef);
Node_Copy(from, newnode, fields);
Node_Copy(from, newnode, indirection);
@@ -1529,7 +1529,7 @@ _copyColumnRef(ColumnRef *from)
static ParamRef *
_copyParamRef(ParamRef *from)
{
ParamRef *newnode = makeNode(ParamRef);
ParamRef *newnode = makeNode(ParamRef);
newnode->number = from->number;
Node_Copy(from, newnode, fields);
@@ -1595,7 +1595,7 @@ _copyAIndices(A_Indices *from)
static ExprFieldSelect *
_copyExprFieldSelect(ExprFieldSelect *from)
{
ExprFieldSelect *newnode = makeNode(ExprFieldSelect);
ExprFieldSelect *newnode = makeNode(ExprFieldSelect);
Node_Copy(from, newnode, arg);
Node_Copy(from, newnode, fields);
@@ -1688,7 +1688,7 @@ _copyRangeSubselect(RangeSubselect *from)
static RangeFunction *
_copyRangeFunction(RangeFunction *from)
{
RangeFunction *newnode = makeNode(RangeFunction);
RangeFunction *newnode = makeNode(RangeFunction);
Node_Copy(from, newnode, funccallnode);
Node_Copy(from, newnode, alias);
@@ -2223,7 +2223,7 @@ _copyTransactionStmt(TransactionStmt *from)
static CompositeTypeStmt *
_copyCompositeTypeStmt(CompositeTypeStmt *from)
{
CompositeTypeStmt *newnode = makeNode(CompositeTypeStmt);
CompositeTypeStmt *newnode = makeNode(CompositeTypeStmt);
Node_Copy(from, newnode, typevar);
Node_Copy(from, newnode, coldeflist);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.33 2002/06/20 20:29:29 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.34 2002/09/04 20:31:19 momjian Exp $
*/
#include "postgres.h"
@@ -233,7 +233,7 @@ makeRelabelType(Node *arg, Oid rtype, int32 rtypmod)
RangeVar *
makeRangeVar(char *schemaname, char *relname)
{
RangeVar *r = makeNode(RangeVar);
RangeVar *r = makeNode(RangeVar);
r->catalogname = NULL;
r->schemaname = schemaname;

View File

@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.172 2002/08/31 22:10:43 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.173 2002/09/04 20:31:19 momjian Exp $
*
* NOTES
* Every (plan) node in POSTGRES has an associated "out" routine which
@@ -232,10 +232,10 @@ _outQuery(StringInfo str, Query *node)
/*
* Hack to work around missing outfuncs routines for a lot of the
* utility-statement node types. (The only one we actually *need*
* for rules support is NotifyStmt.) Someday we ought to support
* 'em all, but for the meantime do this to avoid getting lots of
* warnings when running with debug_print_parse on.
* utility-statement node types. (The only one we actually *need* for
* rules support is NotifyStmt.) Someday we ought to support 'em all,
* but for the meantime do this to avoid getting lots of warnings when
* running with debug_print_parse on.
*/
if (node->utilityStmt)
{
@@ -832,13 +832,13 @@ static void
_outArrayRef(StringInfo str, ArrayRef *node)
{
appendStringInfo(str,
" ARRAYREF :refrestype %u :refattrlength %d :refelemlength %d ",
" ARRAYREF :refrestype %u :refattrlength %d :refelemlength %d ",
node->refrestype,
node->refattrlength,
node->refelemlength);
appendStringInfo(str,
":refelembyval %s :refelemalign %c :refupperindexpr ",
":refelembyval %s :refelemalign %c :refupperindexpr ",
booltostr(node->refelembyval),
node->refelemalign);
_outNode(str, node->refupperindexpr);
@@ -860,7 +860,7 @@ static void
_outFunc(StringInfo str, Func *node)
{
appendStringInfo(str,
" FUNC :funcid %u :funcresulttype %u :funcretset %s ",
" FUNC :funcid %u :funcresulttype %u :funcretset %s ",
node->funcid,
node->funcresulttype,
booltostr(node->funcretset));
@@ -873,7 +873,7 @@ static void
_outOper(StringInfo str, Oper *node)
{
appendStringInfo(str,
" OPER :opno %u :opid %u :opresulttype %u :opretset %s ",
" OPER :opno %u :opid %u :opresulttype %u :opretset %s ",
node->opno,
node->opid,
node->opresulttype,
@@ -1324,6 +1324,7 @@ static void
_outRangeVar(StringInfo str, RangeVar *node)
{
appendStringInfo(str, " RANGEVAR :relation ");
/*
* we deliberately ignore catalogname here, since it is presently not
* semantically meaningful
@@ -1332,8 +1333,8 @@ _outRangeVar(StringInfo str, RangeVar *node)
appendStringInfo(str, " . ");
_outToken(str, node->relname);
appendStringInfo(str, " :inhopt %d :istemp %s",
(int) node->inhOpt,
booltostr(node->istemp));
(int) node->inhOpt,
booltostr(node->istemp));
appendStringInfo(str, " :alias ");
_outNode(str, node->alias);
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.56 2002/06/20 20:29:29 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.57 2002/09/04 20:31:20 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -97,7 +97,7 @@ char *
format_node_dump(const char *dump)
{
#define LINELEN 78
char line[LINELEN+1];
char line[LINELEN + 1];
StringInfoData str;
int i;
int j;
@@ -118,13 +118,13 @@ format_node_dump(const char *dump)
}
else
{
for (k = j-1; k > 0; k--)
for (k = j - 1; k > 0; k--)
if (line[k] == ' ')
break;
if (k > 0)
{
/* back up; will reprint all after space */
i -= (j-k-1);
i -= (j - k - 1);
j = k;
}
}
@@ -153,7 +153,7 @@ pretty_format_node_dump(const char *dump)
#define INDENTSTOP 3
#define MAXINDENT 60
#define LINELEN 78
char line[LINELEN+1];
char line[LINELEN + 1];
StringInfoData str;
int indentLev;
int indentDist;
@@ -182,7 +182,7 @@ pretty_format_node_dump(const char *dump)
}
/* print the } at indentDist */
line[indentDist] = '}';
line[indentDist+1] = '\0';
line[indentDist + 1] = '\0';
appendStringInfo(&str, "%s\n", line);
/* outdent */
if (indentLev > 0)

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.131 2002/08/31 22:10:43 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.132 2002/09/04 20:31:20 momjian Exp $
*
* NOTES
* Most of the read functions for plan nodes are tested. (In fact, they
@@ -1459,7 +1459,8 @@ _readRangeVar(void)
local_node = makeNode(RangeVar);
local_node->catalogname = NULL; /* not currently saved in output format */
local_node->catalogname = NULL; /* not currently saved in output
* format */
token = pg_strtok(&length); /* eat :relation */
token = pg_strtok(&length); /* get schemaname */
@@ -1468,11 +1469,11 @@ _readRangeVar(void)
token = pg_strtok(&length); /* eat "." */
token = pg_strtok(&length); /* get relname */
local_node->relname = nullable_string(token, length);
token = pg_strtok(&length); /* eat :inhopt */
token = pg_strtok(&length); /* get inhopt */
local_node->inhOpt = (InhOption) atoi(token);
token = pg_strtok(&length); /* eat :istemp */
token = pg_strtok(&length); /* get istemp */
local_node->istemp = strtobool(token);
@@ -1493,10 +1494,10 @@ _readColumnRef(void)
local_node = makeNode(ColumnRef);
token = pg_strtok(&length); /* eat :fields */
local_node->fields = nodeRead(true); /* now read it */
local_node->fields = nodeRead(true); /* now read it */
token = pg_strtok(&length); /* eat :indirection */
local_node->indirection = nodeRead(true); /* now read it */
local_node->indirection = nodeRead(true); /* now read it */
return local_node;
}
@@ -1515,7 +1516,7 @@ _readColumnDef(void)
local_node->colname = nullable_string(token, length);
token = pg_strtok(&length); /* eat :typename */
local_node->typename = nodeRead(true); /* now read it */
local_node->typename = nodeRead(true); /* now read it */
token = pg_strtok(&length); /* eat :is_inherited */
token = pg_strtok(&length); /* get :is_inherited */
@@ -1526,7 +1527,7 @@ _readColumnDef(void)
local_node->is_not_null = strtobool(token);
token = pg_strtok(&length); /* eat :raw_default */
local_node->raw_default = nodeRead(true); /* now read it */
local_node->raw_default = nodeRead(true); /* now read it */
token = pg_strtok(&length); /* eat :cooked_default */
token = pg_strtok(&length); /* now read it */
@@ -1536,7 +1537,7 @@ _readColumnDef(void)
local_node->constraints = nodeRead(true); /* now read it */
token = pg_strtok(&length); /* eat :support */
local_node->support = nodeRead(true); /* now read it */
local_node->support = nodeRead(true); /* now read it */
return local_node;
}
@@ -1544,7 +1545,7 @@ _readColumnDef(void)
static TypeName *
_readTypeName(void)
{
TypeName *local_node;
TypeName *local_node;
char *token;
int length;
@@ -1574,7 +1575,7 @@ _readTypeName(void)
local_node->typmod = atoi(token);
token = pg_strtok(&length); /* eat :arrayBounds */
local_node->arrayBounds = nodeRead(true); /* now read it */
local_node->arrayBounds = nodeRead(true); /* now read it */
return local_node;
}
@@ -1582,20 +1583,20 @@ _readTypeName(void)
static ExprFieldSelect *
_readExprFieldSelect(void)
{
ExprFieldSelect *local_node;
ExprFieldSelect *local_node;
char *token;
int length;
local_node = makeNode(ExprFieldSelect);
token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true); /* now read it */
local_node->arg = nodeRead(true); /* now read it */
token = pg_strtok(&length); /* eat :fields */
local_node->fields = nodeRead(true); /* now read it */
local_node->fields = nodeRead(true); /* now read it */
token = pg_strtok(&length); /* eat :indirection */
local_node->indirection = nodeRead(true); /* now read it */
local_node->indirection = nodeRead(true); /* now read it */
return local_node;
}
@@ -1614,7 +1615,7 @@ _readAlias(void)
local_node->aliasname = debackslash(token, length);
token = pg_strtok(&length); /* eat :colnames */
local_node->colnames = nodeRead(true); /* now read it */
local_node->colnames = nodeRead(true); /* now read it */
return local_node;
}
@@ -1671,7 +1672,7 @@ _readRangeTblEntry(void)
local_node->jointype = (JoinType) atoi(token);
token = pg_strtok(&length); /* eat :joinaliasvars */
local_node->joinaliasvars = nodeRead(true); /* now read it */
local_node->joinaliasvars = nodeRead(true); /* now read it */
break;
default:

View File

@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: geqo_misc.c,v 1.33 2002/07/20 04:59:10 momjian Exp $
* $Id: geqo_misc.c,v 1.34 2002/09/04 20:31:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -250,4 +250,4 @@ geqo_print_rel(Query *root, RelOptInfo *rel)
geqo_print_path(root, rel->cheapest_total_path, 1);
}
#endif /* GEQO_DEBUG */
#endif /* GEQO_DEBUG */

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.87 2002/08/29 16:03:48 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.88 2002/09/04 20:31:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,14 +43,14 @@ static void set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
static void set_subquery_pathlist(Query *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_function_pathlist(Query *root, RelOptInfo *rel,
RangeTblEntry *rte);
RangeTblEntry *rte);
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
List *initial_rels);
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery);
static bool recurse_pushdown_safe(Node *setOp, Query *topquery);
static void subquery_push_qual(Query *subquery, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
Index rti, Node *qual);
Index rti, Node *qual);
/*
@@ -304,9 +304,10 @@ set_subquery_pathlist(Query *root, RelOptInfo *rel,
*
* There are several cases where we cannot push down clauses.
* Restrictions involving the subquery are checked by
* subquery_is_pushdown_safe(). Also, we do not push down clauses that
* contain subselects, mainly because I'm not sure it will work correctly
* (the subplan hasn't yet transformed sublinks to subselects).
* subquery_is_pushdown_safe(). Also, we do not push down clauses
* that contain subselects, mainly because I'm not sure it will work
* correctly (the subplan hasn't yet transformed sublinks to
* subselects).
*
* Non-pushed-down clauses will get evaluated as qpquals of the
* SubqueryScan node.
@@ -542,7 +543,7 @@ make_one_rel_by_joins(Query *root, int levels_needed, List *initial_rels)
* quals into it, because that would change the results. For subqueries
* using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can push the quals
* into each component query, so long as all the component queries share
* identical output types. (That restriction could probably be relaxed,
* identical output types. (That restriction could probably be relaxed,
* but it would take much more code to include type coercion code into
* the quals, and I'm also concerned about possible semantic gotchas.)
*/
@@ -633,14 +634,14 @@ subquery_push_qual(Query *subquery, Index rti, Node *qual)
else
{
/*
* We need to replace Vars in the qual (which must refer
* to outputs of the subquery) with copies of the
* subquery's targetlist expressions. Note that at this
* point, any uplevel Vars in the qual should have been
* replaced with Params, so they need no work.
* We need to replace Vars in the qual (which must refer to
* outputs of the subquery) with copies of the subquery's
* targetlist expressions. Note that at this point, any uplevel
* Vars in the qual should have been replaced with Params, so they
* need no work.
*
* This step also ensures that when we are pushing into a setop
* tree, each component query gets its own copy of the qual.
* This step also ensures that when we are pushing into a setop tree,
* each component query gets its own copy of the qual.
*/
qual = ResolveNew(qual, rti, 0,
subquery->targetList,
@@ -649,10 +650,9 @@ subquery_push_qual(Query *subquery, Index rti, Node *qual)
qual);
/*
* We need not change the subquery's hasAggs or
* hasSublinks flags, since we can't be pushing down any
* aggregates that weren't there before, and we don't push
* down subselects at all.
* We need not change the subquery's hasAggs or hasSublinks flags,
* since we can't be pushing down any aggregates that weren't
* there before, and we don't push down subselects at all.
*/
}
}

View File

@@ -42,7 +42,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.89 2002/07/04 15:23:56 thomas Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.90 2002/09/04 20:31:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -408,8 +408,8 @@ cost_functionscan(Path *path, Query *root, RelOptInfo *baserel)
/*
* For now, estimate function's cost at one operator eval per function
* call. Someday we should revive the function cost estimate columns in
* pg_proc...
* call. Someday we should revive the function cost estimate columns
* in pg_proc...
*/
cpu_per_tuple = cpu_operator_cost;
@@ -607,7 +607,7 @@ cost_mergejoin(Path *path, Query *root,
double outer_rows,
inner_rows;
double ntuples;
Selectivity outerscansel,
Selectivity outerscansel,
innerscansel;
Path sort_path; /* dummy for result of cost_sort */
@@ -617,15 +617,15 @@ cost_mergejoin(Path *path, Query *root,
/*
* A merge join will stop as soon as it exhausts either input stream.
* Estimate fraction of the left and right inputs that will actually
* need to be scanned. We use only the first (most significant)
* merge clause for this purpose.
* need to be scanned. We use only the first (most significant) merge
* clause for this purpose.
*
* Since this calculation is somewhat expensive, and will be the same
* for all mergejoin paths associated with the merge clause, we cache
* the results in the RestrictInfo node.
* Since this calculation is somewhat expensive, and will be the same for
* all mergejoin paths associated with the merge clause, we cache the
* results in the RestrictInfo node.
*/
firstclause = (RestrictInfo *) lfirst(mergeclauses);
if (firstclause->left_mergescansel < 0) /* not computed yet? */
if (firstclause->left_mergescansel < 0) /* not computed yet? */
mergejoinscansel(root, (Node *) firstclause->clause,
&firstclause->left_mergescansel,
&firstclause->right_mergescansel);
@@ -697,10 +697,10 @@ cost_mergejoin(Path *path, Query *root,
/*
* The number of tuple comparisons needed depends drastically on the
* number of equal keys in the two source relations, which we have no
* good way of estimating. (XXX could the MCV statistics help?)
* Somewhat arbitrarily, we charge one tuple
* comparison (one cpu_operator_cost) for each tuple in the two source
* relations. This is probably a lower bound.
* good way of estimating. (XXX could the MCV statistics help?)
* Somewhat arbitrarily, we charge one tuple comparison (one
* cpu_operator_cost) for each tuple in the two source relations.
* This is probably a lower bound.
*/
run_cost += cpu_operator_cost * (outer_rows + inner_rows);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.121 2002/09/02 06:22:18 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.122 2002/09/04 20:31:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1332,7 +1332,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
test_oper = makeOper(test_op, /* opno */
InvalidOid, /* opid */
BOOLOID, /* opresulttype */
false); /* opretset */
false); /* opretset */
replace_opid(test_oper);
test_expr = make_opclause(test_oper,
(Var *) clause_const,
@@ -1712,7 +1712,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
@@ -1922,7 +1922,7 @@ expand_indexqual_conditions(List *indexquals)
case OID_CIDR_SUBEQ_OP:
resultquals = nconc(resultquals,
network_prefix_quals(leftop, expr_op,
patt->constvalue));
patt->constvalue));
break;
default:

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.70 2002/09/02 02:47:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.71 2002/09/04 20:31:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -171,7 +171,7 @@ sort_inner_and_outer(Query *root,
default:
elog(ERROR, "sort_inner_and_outer: unexpected join type %d",
(int) jointype);
useallclauses = false; /* keep compiler quiet */
useallclauses = false; /* keep compiler quiet */
break;
}

View File

@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.39 2002/06/20 20:29:30 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.40 2002/09/04 20:31:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -519,7 +519,7 @@ build_index_pathkeys(Query *root,
funcnode->funcid = index->indproc;
funcnode->funcresulttype = get_func_rettype(index->indproc);
funcnode->funcretset = false; /* can never be a set */
funcnode->funcretset = false; /* can never be a set */
funcnode->func_fcache = NULL;
while (*indexkeys != 0)
@@ -769,22 +769,22 @@ find_mergeclauses_for_pathkeys(Query *root,
/*
* We can match a pathkey against either left or right side of any
* mergejoin clause. (We examine both sides since we aren't told if
* the given pathkeys are for inner or outer input path; no confusion
* is possible.) Furthermore, if there are multiple matching
* clauses, take them all. In plain inner-join scenarios we expect
* only one match, because redundant-mergeclause elimination will
* have removed any redundant mergeclauses from the input list.
* However, in outer-join scenarios there might be multiple matches.
* An example is
* mergejoin clause. (We examine both sides since we aren't told
* if the given pathkeys are for inner or outer input path; no
* confusion is possible.) Furthermore, if there are multiple
* matching clauses, take them all. In plain inner-join scenarios
* we expect only one match, because redundant-mergeclause
* elimination will have removed any redundant mergeclauses from
* the input list. However, in outer-join scenarios there might be
* multiple matches. An example is
*
* select * from a full join b on
* a.v1 = b.v1 and a.v2 = b.v2 and a.v1 = b.v2;
* select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and
* a.v1 = b.v2;
*
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all
* three clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2)
* and indeed we *must* do so or we will be unable to form a
* valid plan.
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three
* clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and
* indeed we *must* do so or we will be unable to form a valid
* plan.
*/
foreach(j, restrictinfos)
{
@@ -812,8 +812,8 @@ find_mergeclauses_for_pathkeys(Query *root,
break;
/*
* If we did find usable mergeclause(s) for this sort-key position,
* add them to result list.
* If we did find usable mergeclause(s) for this sort-key
* position, add them to result list.
*/
mergeclauses = nconc(mergeclauses, matched_restrictinfos);
}

View File

@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.117 2002/09/02 02:47:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.118 2002/09/04 20:31:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,7 +79,7 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tideval);
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
Index scanrelid);
Index scanrelid);
static NestLoop *make_nestloop(List *tlist,
List *joinclauses, List *otherclauses,
Plan *lefttree, Plan *righttree,
@@ -206,8 +206,8 @@ create_scan_plan(Query *root, Path *best_path)
case T_FunctionScan:
plan = (Scan *) create_functionscan_plan(best_path,
tlist,
scan_clauses);
tlist,
scan_clauses);
break;
default:
@@ -1346,8 +1346,8 @@ make_functionscan(List *qptlist,
List *qpqual,
Index scanrelid)
{
FunctionScan *node = makeNode(FunctionScan);
Plan *plan = &node->scan.plan;
FunctionScan *node = makeNode(FunctionScan);
Plan *plan = &node->scan.plan;
/* cost should be inserted by caller */
plan->state = (EState *) NULL;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.74 2002/09/02 02:47:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.75 2002/09/04 20:31:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,12 +108,15 @@ add_base_rels_to_query(Query *root, Node *jtnode)
add_base_rels_to_query(root, j->rarg));
/* the join's own rtindex is NOT added to result */
jrel = build_other_rel(root, j->rtindex);
/*
* Mark the join's otherrel with outerjoinset = list of baserel ids
* included in the join. Note we must copy here because result list
* is destructively modified by nconcs at higher levels.
* Mark the join's otherrel with outerjoinset = list of baserel
* ids included in the join. Note we must copy here because
* result list is destructively modified by nconcs at higher
* levels.
*/
jrel->outerjoinset = listCopy(result);
/*
* Safety check: join RTEs should not be SELECT FOR UPDATE targets
*/
@@ -172,8 +175,8 @@ add_vars_to_targetlist(Query *root, List *vars)
if (rel->reloptkind == RELOPT_OTHER_JOIN_REL)
{
/* Var is an alias */
Node *expansion;
List *varsused;
Node *expansion;
List *varsused;
expansion = flatten_join_alias_vars((Node *) var,
root->rtable, true);
@@ -196,7 +199,7 @@ add_vars_to_targetlist(Query *root, List *vars)
* distribute_quals_to_rels
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate RestrictInfo and JoinInfo
* lists belonging to base RelOptInfos. Also, base RelOptInfos are marked
* lists belonging to base RelOptInfos. Also, base RelOptInfos are marked
* with outerjoinset information, to aid in proper positioning of qual
* clauses that appear above outer joins.
*
@@ -400,7 +403,8 @@ distribute_qual_to_rels(Query *root, Node *clause,
restrictinfo->right_sortop = InvalidOid;
restrictinfo->left_pathkey = NIL; /* not computable yet */
restrictinfo->right_pathkey = NIL;
restrictinfo->left_mergescansel = -1; /* not computed until needed */
restrictinfo->left_mergescansel = -1; /* not computed until
* needed */
restrictinfo->right_mergescansel = -1;
restrictinfo->hashjoinoperator = InvalidOid;
restrictinfo->left_bucketsize = -1; /* not computed until needed */
@@ -419,7 +423,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
* earlier by add_base_rels_to_query.
*
* We can combine this step with a cross-check that the clause contains
* no relids not within its scope. If the first crosscheck succeeds,
* no relids not within its scope. If the first crosscheck succeeds,
* the clause contains no aliases and we needn't look more closely.
*/
if (!is_subseti(relids, qualscope))
@@ -763,10 +767,10 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
clause = makeNode(Expr);
clause->typeOid = BOOLOID;
clause->opType = OP_EXPR;
clause->oper = (Node *) makeOper(oprid(eq_operator),/* opno */
clause->oper = (Node *) makeOper(oprid(eq_operator), /* opno */
InvalidOid, /* opid */
BOOLOID, /* opresulttype */
false); /* opretset */
BOOLOID, /* opresulttype */
false); /* opretset */
clause->args = makeList2(item1, item2);
ReleaseSysCache(eq_operator);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.123 2002/08/28 20:46:23 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.124 2002/09/04 20:31:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,7 +42,7 @@
static Node *pull_up_subqueries(Query *parse, Node *jtnode,
bool below_outer_join);
bool below_outer_join);
static bool is_simple_subquery(Query *subquery);
static bool has_nullable_targetlist(Query *subquery);
static void resolvenew_in_jointree(Node *jtnode, int varno, List *subtlist);
@@ -301,16 +301,16 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
*
* If we are inside an outer join, only pull up subqueries whose
* targetlists are nullable --- otherwise substituting their tlist
* entries for upper Var references would do the wrong thing
* (the results wouldn't become NULL when they're supposed to).
* XXX This could be improved by generating pseudo-variables for
* such expressions; we'd have to figure out how to get the pseudo-
* variables evaluated at the right place in the modified plan tree.
* Fix it someday.
* entries for upper Var references would do the wrong thing (the
* results wouldn't become NULL when they're supposed to). XXX
* This could be improved by generating pseudo-variables for such
* expressions; we'd have to figure out how to get the pseudo-
* variables evaluated at the right place in the modified plan
* tree. Fix it someday.
*
* Note: even if the subquery itself is simple enough, we can't pull
* it up if there is a reference to its whole tuple result. Perhaps
* a pseudo-variable is the answer here too.
* it up if there is a reference to its whole tuple result.
* Perhaps a pseudo-variable is the answer here too.
*/
if (rte->rtekind == RTE_SUBQUERY && is_simple_subquery(subquery) &&
(!below_outer_join || has_nullable_targetlist(subquery)) &&
@@ -336,8 +336,8 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
below_outer_join);
/*
* Now make a modifiable copy of the subquery that we can
* run OffsetVarNodes on.
* Now make a modifiable copy of the subquery that we can run
* OffsetVarNodes on.
*/
subquery = copyObject(subquery);
@@ -352,7 +352,8 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
* Replace all of the top query's references to the subquery's
* outputs with copies of the adjusted subtlist items, being
* careful not to replace any of the jointree structure.
* (This'd be a lot cleaner if we could use query_tree_mutator.)
* (This'd be a lot cleaner if we could use
* query_tree_mutator.)
*/
subtlist = subquery->targetList;
parse->targetList = (List *)
@@ -375,15 +376,16 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
}
/*
* Now append the adjusted rtable entries to upper query.
* (We hold off until after fixing the upper rtable entries;
* no point in running that code on the subquery ones too.)
* Now append the adjusted rtable entries to upper query. (We
* hold off until after fixing the upper rtable entries; no
* point in running that code on the subquery ones too.)
*/
parse->rtable = nconc(parse->rtable, subquery->rtable);
/*
* Pull up any FOR UPDATE markers, too. (OffsetVarNodes
* already adjusted the marker values, so just nconc the list.)
* already adjusted the marker values, so just nconc the
* list.)
*/
parse->rowMarks = nconc(parse->rowMarks, subquery->rowMarks);
@@ -500,9 +502,9 @@ is_simple_subquery(Query *subquery)
/*
* Don't pull up a subquery that has any set-returning functions in
* its targetlist. Otherwise we might well wind up inserting
* set-returning functions into places where they mustn't go,
* such as quals of higher queries.
* its targetlist. Otherwise we might well wind up inserting
* set-returning functions into places where they mustn't go, such as
* quals of higher queries.
*/
if (expression_returns_set((Node *) subquery->targetList))
return false;
@@ -724,8 +726,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
/*
* If the query has any join RTEs, try to replace join alias variables
* with base-relation variables, to allow quals to be pushed down.
* We must do this after sublink processing, since it does not recurse
* with base-relation variables, to allow quals to be pushed down. We
* must do this after sublink processing, since it does not recurse
* into sublinks.
*
* The flattening pass is expensive enough that it seems worthwhile to

Some files were not shown because too many files have changed in this diff Show More